code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Dict ={
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int =[
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 135 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
A: Any = int(np.ceil((x_end - xa) / h ) )
A: Union[str, Any] = np.zeros((n + 1,) )
A: Optional[int] = ya
A: int = xa
for k in range(lowerCamelCase__ ):
A: Optional[int] = f(lowerCamelCase__ , y[k] )
A: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: Optional[int] = f(x + h , y[k] + h * ka )
A: Optional[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A = 2_5_0_0_0_4
_A = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Optional[Any] = MBartaaTokenizer
_lowerCamelCase :List[Any] = MBartaaTokenizerFast
_lowerCamelCase :Optional[int] = True
_lowerCamelCase :Union[str, Any] = True
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Any = MBartaaTokenizer(UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """<s>"""
lowerCAmelCase__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase ) , 10_54 )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = MBartaaTokenizer(UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
# fmt: off
lowerCAmelCase__ : List[str] = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
lowerCAmelCase__ : Tuple = tokenizer_r.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Any = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase__ : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Tuple = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase , UpperCamelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[str] = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Any = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ : Dict = tempfile.mkdtemp()
lowerCAmelCase__ : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase , legacy_format=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.save_pretrained(UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ : List[Any] = tokenizer_r.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase , UpperCamelCase ) )
shutil.rmtree(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
_lowerCamelCase :int = "facebook/mbart-large-50-one-to-many-mmt"
_lowerCamelCase :Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_lowerCamelCase :Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_lowerCamelCase :Any = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def _lowerCAmelCase ( cls : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCAmelCase__ : Union[str, Any] = 1
return cls
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
lowerCAmelCase__ : Union[str, Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCAmelCase__ : List[str] = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = 10
lowerCAmelCase__ : Optional[int] = self.tokenizer(UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Tuple = MBartaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase__ : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = self.tokenizer(self.src_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase__ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase__ : List[str] = targets["""input_ids"""]
lowerCAmelCase__ : Union[str, Any] = shift_tokens_right(UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 507 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_3_7_8_1_3_7
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowerCAmelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : Optional[int] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowerCAmelCase__ : List[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Any = haversine_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : int = (b_lata + b_lata) / 2
lowerCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : Optional[int] = (sin(__UpperCAmelCase ) ** 2) * (cos(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : Dict = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Union[str, Any] = (sigma - sin(__UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Tuple = (cos(__UpperCAmelCase ) ** 2) * (sin(__UpperCAmelCase ) ** 2)
lowerCAmelCase__ : int = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : int = (sigma + sin(__UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = 1.5
UpperCAmelCase_ : Optional[int] = int(factor * num_class_images )
UpperCAmelCase_ : Optional[Any] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_lowerCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=_lowerCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase_ : Optional[int] = client.query(text=_lowerCamelCase )
if len(_lowerCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase_ : List[Any] = int(factor * num_images )
UpperCAmelCase_ : List[str] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Tuple = tqdm(desc='downloading real regularization images' , total=_lowerCamelCase )
with open(F"{class_data_dir}/caption.txt" , 'w' ) as fa, open(F"{class_data_dir}/urls.txt" , 'w' ) as fa, open(
F"{class_data_dir}/images.txt" , 'w' ) as fa:
while total < num_class_images:
UpperCAmelCase_ : Optional[int] = class_images[count]
count += 1
try:
UpperCAmelCase_ : Dict = requests.get(images['url'] )
if img.status_code == 200:
UpperCAmelCase_ : Dict = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"{class_data_dir}/images/{total}.jpg" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser('' , add_help=_lowerCamelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_lowerCamelCase , type=_lowerCamelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_lowerCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 406 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_snake_case = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_snake_case = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : str = bs[:]
_lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : Optional[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
errors=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, **__a, )
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : str = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : str = bytes_to_unicode()
_lowerCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split()) for merge in bpe_merges]
_lowerCAmelCase : List[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : int = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : List[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : int = 0
while i < len(__a):
try:
_lowerCAmelCase : Union[str, Any] = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Union[str, Any] = tuple(__a)
_lowerCAmelCase : List[str] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a)
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : Tuple = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for token in re.findall(self.pat, __a):
_lowerCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a).split(" "))
return bpe_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a)
_lowerCAmelCase : Any = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : Tuple = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Any = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a=False, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__a) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = " " + text
return (text, kwargs)
| 500 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : str = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase : List[Any] = '''FlavaImageProcessor'''
__lowerCAmelCase : Optional[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , a : Optional[Any]=None , a : Tuple=None , **a : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
lowercase = kwargs.pop('''feature_extractor''' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
lowercase = self.image_processor
def __call__( self : List[str] , a : Optional[ImageInput] = None , a : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = False , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Optional[int] , ) -> List[str]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
if images is not None:
lowercase = self.image_processor(
a , return_image_mask=a , return_codebook_pixels=a , return_tensors=a , **a , )
if text is not None and images is not None:
encoding.update(a )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCAmelCase ( self : int , *a : str , **a : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def _lowerCAmelCase ( self : Tuple , *a : List[Any] , **a : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor | 396 |
"""simple docstring"""
from PIL import Image
def A_ ( __UpperCamelCase : Image , __UpperCamelCase : int ):
lowercase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCamelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__lowerCAmelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''') | 396 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : List[str] = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 142 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 142 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: str ={
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415 | '''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __A ( UpperCamelCase__ ):
a__ : torch.FloatTensor
a__ : Optional[torch.FloatTensor] = None
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple=0.999 , snake_case_ : List[str]="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Any = 1
@register_to_config
def __init__(self : List[str] , __a : int = 1000 , __a : float = 0.00_01 , __a : float = 0.02 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : bool = True , __a : bool = True , __a : int = 0 , __a : str = "epsilon" , __a : float = 1.0 , **__a : Any , ):
if kwargs.get("set_alpha_to_one" , __a ) is not None:
UpperCAmelCase_ = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , __a , standard_warn=__a )
UpperCAmelCase_ = kwargs["set_alpha_to_one"]
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCAmelCase_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCAmelCase_ = 1.0
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.from_numpy(np.arange(0 , __a ).copy().astype(np.intaa ) )
def _lowercase (self : int , __a : torch.FloatTensor , __a : Optional[int] = None ):
return sample
def _lowercase (self : Optional[int] , __a : int , __a : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round().copy().astype(np.intaa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(__a )
self.timesteps += self.config.steps_offset
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : int , __a : torch.FloatTensor , __a : float = 0.0 , __a : bool = False , __a : Optional[torch.FloatTensor] = None , __a : bool = True , ):
# 1. get previous step value (=t+1)
UpperCAmelCase_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCAmelCase_ = model_output
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCAmelCase_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__a , pred_original_sample=__a )
def __len__(self : Dict ):
return self.config.num_train_timesteps
| 415 | 1 |
SCREAMING_SNAKE_CASE__ = range(2, 20 + 1)
SCREAMING_SNAKE_CASE__ = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE__ = {}
def lowercase ( a , a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE_ :str = 0, 0
SCREAMING_SNAKE_CASE_ :Optional[int] = n - i
SCREAMING_SNAKE_CASE_ :List[Any] = memo.get(UpperCAmelCase_ )
if sub_memo is not None:
SCREAMING_SNAKE_CASE_ :Any = sub_memo.get(UpperCAmelCase_ )
if jumps is not None and len(UpperCAmelCase_ ) > 0:
# find and make the largest jump without going over
SCREAMING_SNAKE_CASE_ :int = -1
for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
SCREAMING_SNAKE_CASE_ :Dict = _k
break
if max_jump >= 0:
SCREAMING_SNAKE_CASE_ :Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
SCREAMING_SNAKE_CASE_ :str = diff + c
for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = divmod(UpperCAmelCase_ , 10 )
if new_c > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ :List[str] = []
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {c: []}
SCREAMING_SNAKE_CASE_ :Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
SCREAMING_SNAKE_CASE_ :int = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
SCREAMING_SNAKE_CASE_ :int = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ )
diff += _diff
dn += terms_jumped
SCREAMING_SNAKE_CASE_ :List[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
SCREAMING_SNAKE_CASE_ :Optional[Any] = 0
while j < len(UpperCAmelCase_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) )
return (diff, dn)
def lowercase ( a , a , a , a ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(UpperCAmelCase_ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
SCREAMING_SNAKE_CASE_ :List[str] = i
SCREAMING_SNAKE_CASE_ :str = 0, 0, 0
for j in range(len(UpperCAmelCase_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
SCREAMING_SNAKE_CASE_ :Dict = ds_c + ds_b
diff += addend
SCREAMING_SNAKE_CASE_ :List[Any] = 0
for j in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = a_i[j] + addend
SCREAMING_SNAKE_CASE_ :Any = divmod(UpperCAmelCase_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return diff, i - start_i
def lowercase ( a , a , a ):
'''simple docstring'''
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ :List[str] = digits[j] + addend
if s >= 10:
SCREAMING_SNAKE_CASE_ :Optional[int] = divmod(UpperCAmelCase_ , 10 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = addend // 10 + quotient
else:
SCREAMING_SNAKE_CASE_ :int = s
SCREAMING_SNAKE_CASE_ :Tuple = addend // 10
if addend == 0:
break
while addend > 0:
SCREAMING_SNAKE_CASE_ :Optional[Any] = divmod(UpperCAmelCase_ , 10 )
digits.append(UpperCAmelCase_ )
def lowercase ( a = 10**15 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = [1]
SCREAMING_SNAKE_CASE_ :Dict = 1
SCREAMING_SNAKE_CASE_ :int = 0
while True:
SCREAMING_SNAKE_CASE_ :str = next_term(UpperCAmelCase_ , 20 , i + dn , UpperCAmelCase_ )
dn += terms_jumped
if dn == n - i:
break
SCREAMING_SNAKE_CASE_ :List[Any] = 0
for j in range(len(UpperCAmelCase_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 631 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
A__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self: Optional[Any] , *,
__UpperCamelCase: int = 4 , __UpperCamelCase: int = 7_68 , __UpperCamelCase: int , __UpperCamelCase: Any , ):
'''simple docstring'''
super().__init__()
__magic_name__ = nn.Parameter(torch.zeros(__UpperCamelCase ) )
# parameters for additional clip time embeddings
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
# parameters for encoder hidden states
__magic_name__ = clip_extra_context_tokens
__magic_name__ = nn.Linear(
__UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = nn.LayerNorm(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] , *, __UpperCamelCase: Union[str, Any] , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Any ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__magic_name__ = image_embeddings.shape[0]
__magic_name__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__magic_name__ = classifier_free_guidance_embeddings.expand(
__UpperCamelCase , -1 )
__magic_name__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__magic_name__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__magic_name__ = self.embedding_proj(__UpperCamelCase )
__magic_name__ = self.clip_image_embeddings_project_to_time_embeddings(__UpperCamelCase )
__magic_name__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__magic_name__ = self.clip_extra_context_tokens_proj(__UpperCamelCase )
__magic_name__ = clip_extra_context_tokens.reshape(__UpperCamelCase , -1 , self.clip_extra_context_tokens )
__magic_name__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
__magic_name__ = self.encoder_hidden_states_proj(__UpperCamelCase )
__magic_name__ = self.text_encoder_hidden_states_norm(__UpperCamelCase )
__magic_name__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 184 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase :
def __init__( self :Union[str, Any] , lowercase_ :int , lowercase_ :MutableSequence[float] )-> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
A__ = list(lowercase_ )
A__ = degree
def __add__( self :Optional[Any] , lowercase_ :Polynomial )-> Polynomial:
if self.degree > polynomial_a.degree:
A__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_ )
else:
A__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_ )
def __sub__( self :Dict , lowercase_ :Polynomial )-> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self :Optional[Any] )-> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self :Optional[Any] , lowercase_ :Polynomial )-> Polynomial:
A__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int | float )-> int | float:
A__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self :Optional[Any] )-> str:
A__ = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self :Tuple )-> str:
return self.__str__()
def UpperCAmelCase_ ( self :Optional[Any] )-> Polynomial:
A__ = [0] * self.degree
for i in range(self.degree ):
A__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int | float = 0 )-> Polynomial:
A__ = [0] * (self.degree + 2)
A__ = constant
for i in range(self.degree + 1 ):
A__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_ )
def __eq__( self :List[str] , lowercase_ :object )-> bool:
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self :List[str] , lowercase_ :object )-> bool:
return not self.__eq__(lowercase_ )
| 440 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :List[str] , lowercase_ :int = 7_68 , )-> str:
super().__init__()
A__ = nn.Parameter(torch.zeros(1 , lowercase_ ) )
A__ = nn.Parameter(torch.ones(1 , lowercase_ ) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Union[str, torch.device]] = None , lowercase_ :Optional[torch.dtype] = None , )-> Tuple:
A__ = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) )
A__ = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) )
return self
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :str )-> Union[str, Any]:
A__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase_ ( self :Any , lowercase_ :List[Any] )-> Tuple:
A__ = (embeds * self.std) + self.mean
return embeds
| 440 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=False , A_=False , A_=False , A_=2 , A_=99 , A_=0 , A_=32 , A_=5 , A_=4 , A_=0.1 , A_=0.1 , A_=512 , A_=12 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_="last" , A_=None , A_=None , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def __snake_case ( self ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self ) -> int:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]:
lowerCAmelCase = FlaubertModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , lengths=A_ , langs=A_ )
lowerCAmelCase = model(A_ , langs=A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
lowerCAmelCase = FlaubertWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[str]:
lowerCAmelCase = FlaubertForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
lowerCAmelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((lowerCAmelCase), ) = result_with_labels.to_tuple()
lowerCAmelCase = model(A_ , start_positions=A_ , end_positions=A_ )
((lowerCAmelCase), ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Tuple:
lowerCAmelCase = FlaubertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> int:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase : List[str] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self , A_ , A_ , A_=False ) -> int:
lowerCAmelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __snake_case ( self ) -> Dict:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , emb_dim=37 )
def __snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A_ )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A_ )
@slow
def __snake_case ( self ) -> List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = torch.jit.trace(
A_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(A_ , """traced_model.pt""" ) , map_location=A_ )
loaded(inputs_dict["""input_ids"""].to(A_ ) , inputs_dict["""attention_mask"""].to(A_ ) )
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> str:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A_ )
lowerCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) ) | 344 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 1 , _SCREAMING_SNAKE_CASE : float = 1 , _SCREAMING_SNAKE_CASE : float = 1.0E4 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'Embedding dimension {embedding_dim} should be even'
lowerCAmelCase = float(embedding_dim // 2 )
lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) * jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
# scale embeddings
lowerCAmelCase = scale * emb
if flip_sin_to_cos:
lowerCAmelCase = jnp.concatenate([jnp.cos(_SCREAMING_SNAKE_CASE ), jnp.sin(_SCREAMING_SNAKE_CASE )] , axis=1 )
else:
lowerCAmelCase = jnp.concatenate([jnp.sin(_SCREAMING_SNAKE_CASE ), jnp.cos(_SCREAMING_SNAKE_CASE )] , axis=1 )
lowerCAmelCase = jnp.reshape(_SCREAMING_SNAKE_CASE , [jnp.shape(_SCREAMING_SNAKE_CASE )[0], embedding_dim] )
return signal
class __snake_case( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , A_ ) -> int:
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(A_ )
lowerCAmelCase = nn.silu(A_ )
lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(A_ )
return temb
class __snake_case( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int = 32
UpperCAmelCase : bool = False
UpperCAmelCase : float = 1
@nn.compact
def __call__( self , A_ ) -> List[str]:
return get_sinusoidal_embeddings(
A_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift ) | 344 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[Any] = XGLMConfig
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Dict = '''gelu'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int=14 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : int=512 , _UpperCAmelCase : List[str]=0.02 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_labels
_A = vocab_size
_A = d_model
_A = num_hidden_layers
_A = num_attention_heads
_A = ffn_dim
_A = activation_function
_A = activation_dropout
_A = attention_dropout
_A = max_position_embeddings
_A = initializer_range
_A = None
_A = 0
_A = 2
_A = 1
def lowerCAmelCase_ ( self : Any ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowerCAmelCase_ ( self : str ):
_A = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = self.get_config()
_A = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase_ ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : int = False
def lowerCAmelCase_ ( self : List[Any] ):
_A = TFXGLMModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFXGLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowerCAmelCase_ ( self : List[str] ):
super().test_resize_token_embeddings()
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str=True ):
_A = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_A = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_A = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
_A = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_A = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_A = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_A = tokenizer('Today is a nice day and' , return_tensors='tf' )
_A = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_A = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , seed=[7, 0] )
_A = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase )
_A = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_A = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_A = 'left'
# use different length sentences to test batching
_A = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_A = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding=_UpperCAmelCase )
_A = inputs['input_ids']
_A = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_A = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_A = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
_A = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_A = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
_A = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
_A = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 7 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'gpt_neo'
_UpperCamelCase : Optional[Any] = ['past_key_values']
_UpperCamelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case_=50_257 , snake_case_=2_048 , snake_case_=2_048 , snake_case_=24 , snake_case_=[[["global", "local"], 12]] , snake_case_=16 , snake_case_=None , snake_case_=256 , snake_case_="gelu_new" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=50_256 , snake_case_=50_256 , **snake_case_ , ):
'''simple docstring'''
A__ : int = vocab_size
A__ : Optional[Any] = max_position_embeddings
A__ : int = hidden_size
A__ : List[Any] = num_layers
A__ : Any = num_heads
A__ : List[str] = intermediate_size
A__ : Dict = window_size
A__ : Optional[int] = activation_function
A__ : Optional[int] = resid_dropout
A__ : List[str] = embed_dropout
A__ : str = attention_dropout
A__ : List[str] = classifier_dropout
A__ : str = layer_norm_epsilon
A__ : str = initializer_range
A__ : Any = use_cache
A__ : List[str] = bos_token_id
A__ : Any = eos_token_id
A__ : Optional[Any] = attention_types
A__ : List[Any] = self.expand_attention_types_params(snake_case_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@staticmethod
def lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Union[str, Any] = input.size()
A__ : str = len(lowerCAmelCase )
A__ : Union[str, Any] = shape[dimension]
A__ : Dict = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
A__ : Dict = torch.div(sizedim - size , lowerCAmelCase , rounding_mode="""floor""" ) + 1
A__ : str = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
A__ : str = [slice(lowerCAmelCase )] * rank
A__ : Optional[Any] = indices
A__ : Tuple = input[s]
A__ : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Optional[Any] = torch.arange(1 , lowerCAmelCase )
A__ : int = torch.remainder(lowerCAmelCase , lowerCAmelCase )
A__ : str = remainders == 0
A__ : Tuple = candidates[divisor_indices]
A__ : Any = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode="""floor""" )
class __UpperCAmelCase (__A ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
A__ : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._config.num_heads
def lowerCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
'''simple docstring'''
A__ : Optional[int] = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : Any = seqlen + 2
A__ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : List[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
A__ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
A__ : List[Any] = ordered_inputs["""attention_mask"""].dtype
A__ : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 363 | 0 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=snake_case , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=snake_case , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=snake_case , help='where to store parsed gold_data_path file' , )
_lowerCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
_lowerCamelCase = json.load(snake_case )
for dpr_record in tqdm(snake_case ):
_lowerCamelCase = dpr_record['question']
_lowerCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(snake_case ) + '\n' )
if __name__ == "__main__":
main()
| 222 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str = "cpu" , snake_case : Union[str, None] = None )-> None:
_lowerCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCamelCase = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 222 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.dummy_uncond_unet
_lowerCamelCase : Optional[Any] = ScoreSdeVeScheduler()
_lowerCamelCase : str = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = sde_ve(num_inference_steps=2 ,output_type="numpy" ,generator=__lowerCAmelCase ).images
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : int = sde_ve(num_inference_steps=2 ,output_type="numpy" ,generator=__lowerCAmelCase ,return_dict=__lowerCAmelCase )[
0
]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "google/ncsnpp-church-256"
_lowerCamelCase : str = UNetaDModel.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
sde_ve.to(__lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Tuple = sde_ve(num_inference_steps=10 ,output_type="numpy" ,generator=__lowerCAmelCase ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 46 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )->Any:
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] )->str:
_lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , '''r''' ).readlines()]
_lowerCAmelCase = []
if args.gold_data_mode == "qa":
_lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep='''\t''' , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
_lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , '''r''' ).readlines()]
_lowerCAmelCase = [[reference] for reference in references]
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 100.0 * em / total
_lowerCAmelCase = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict )->Tuple:
_lowerCAmelCase = args.k
_lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , '''r''' ).readlines()]
_lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , '''r''' ).readlines()]
_lowerCAmelCase = _lowerCAmelCase = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = set(hypo.split('''\t''' )[:k] )
_lowerCAmelCase = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any )->List[str]:
def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith('''"''' ):
_lowerCAmelCase = title[1:]
if title.endswith('''"''' ):
_lowerCAmelCase = title[:-1]
return title
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )['''input_ids'''].to(args.device )
_lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = question_enc_outputs[0]
_lowerCAmelCase = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
_lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase = []
for docs in all_docs:
_lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
with torch.no_grad():
_lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info('''Q: {} - A: {}'''.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def UpperCAmelCase__ ( )->str:
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_SCREAMING_SNAKE_CASE , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_SCREAMING_SNAKE_CASE , choices=['''exact''', '''compressed''', '''legacy'''] , type=_SCREAMING_SNAKE_CASE , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_SCREAMING_SNAKE_CASE , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_SCREAMING_SNAKE_CASE , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_SCREAMING_SNAKE_CASE , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_SCREAMING_SNAKE_CASE , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_SCREAMING_SNAKE_CASE , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_SCREAMING_SNAKE_CASE , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_SCREAMING_SNAKE_CASE , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_SCREAMING_SNAKE_CASE , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=5_0 , type=_SCREAMING_SNAKE_CASE , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
_lowerCAmelCase = {}
if args.model_type is None:
_lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
_lowerCAmelCase = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
_lowerCAmelCase = args.n_docs
if args.index_name is not None:
_lowerCAmelCase = args.index_name
if args.index_path is not None:
_lowerCAmelCase = args.index_path
else:
_lowerCAmelCase = BartForConditionalGeneration
_lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
_lowerCAmelCase = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_SCREAMING_SNAKE_CASE ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
_lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
_lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
_lowerCAmelCase = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
_lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) + '''\n''' )
preds_file.flush()
_lowerCAmelCase = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase_ = get_args()
main(args) | 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 664 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE_ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 205 |
import qiskit
def lowercase ( SCREAMING_SNAKE_CASE = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 205 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = botoa.client('''iam''' )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__UpperCAmelCase, AssumeRolePolicyDocument=json.dumps(__UpperCAmelCase, indent=2 ) )
snake_case_ = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__UpperCAmelCase, PolicyName=F"{role_name}_policy_permission", PolicyDocument=json.dumps(__UpperCAmelCase, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"role {role_name} already exists. Using existing one" )
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = botoa.client('''iam''' )
return iam_client.get_role(RoleName=__UpperCAmelCase )["Role"]["Arn"]
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = _ask_options(
'''How do you want to authorize?''', ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''], __UpperCAmelCase, )
snake_case_ = None
if credentials_configuration == 0:
snake_case_ = _ask_field('''Enter your AWS Profile name: [default] ''', default='''default''' )
snake_case_ = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
snake_case_ = _ask_field('''AWS Access Key ID: ''' )
snake_case_ = aws_access_key_id
snake_case_ = _ask_field('''AWS Secret Access Key: ''' )
snake_case_ = aws_secret_access_key
snake_case_ = _ask_field('''Enter your AWS Region: [us-east-1]''', default='''us-east-1''' )
snake_case_ = aws_region
snake_case_ = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''', ['''Provide IAM Role name''', '''Create new IAM role using credentials'''], __UpperCAmelCase, )
if role_management == 0:
snake_case_ = _ask_field('''Enter your IAM role name: ''' )
else:
snake_case_ = '''accelerate_sagemaker_execution_role'''
print(F"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__UpperCAmelCase )
snake_case_ = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_custom_docker_image:
snake_case_ = _ask_field('''Enter your Docker image: ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower() )
snake_case_ = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_sagemaker_inputs_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), )
snake_case_ = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = None
if is_sagemaker_metrics_enabled:
snake_case_ = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''', lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), )
snake_case_ = _ask_options(
'''What is the distributed mode?''', ['''No distributed training''', '''Data parallelism'''], _convert_sagemaker_distributed_mode, )
snake_case_ = {}
snake_case_ = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
if use_dynamo:
snake_case_ = '''dynamo_'''
snake_case_ = _ask_options(
'''Which dynamo backend would you like to use?''', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
snake_case_ = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
if use_custom_options:
snake_case_ = _ask_options(
'''Which mode do you want to use?''', __UpperCAmelCase, lambda __UpperCAmelCase : TORCH_DYNAMO_MODES[int(__UpperCAmelCase )], default='''default''', )
snake_case_ = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''', _convert_yes_no_to_bool, default=__UpperCAmelCase, error_message='''Please enter yes or no.''', )
snake_case_ = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
snake_case_ = _ask_options(
__UpperCAmelCase, __UpperCAmelCase, lambda __UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__UpperCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case_ = _ask_field(__UpperCAmelCase, lambda __UpperCAmelCase : str(__UpperCAmelCase ).lower(), default='''ml.p3.2xlarge''' )
snake_case_ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case_ = _ask_field(
'''How many machines do you want use? [1]: ''', __UpperCAmelCase, default=1, )
snake_case_ = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''', ['''no''', '''fp16''', '''bf16''', '''fp8'''], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=__UpperCAmelCase, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=__UpperCAmelCase, use_cpu=__UpperCAmelCase, dynamo_config=__UpperCAmelCase, eca_instance_type=__UpperCAmelCase, profile=__UpperCAmelCase, region=__UpperCAmelCase, iam_role_name=__UpperCAmelCase, mixed_precision=__UpperCAmelCase, num_machines=__UpperCAmelCase, sagemaker_inputs_file=__UpperCAmelCase, sagemaker_metrics_file=__UpperCAmelCase, )
| 593 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a : Tuple = logging.get_logger(__name__)
a : Any = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ = model_type_to_module_name(__UpperCAmelCase )
snake_case_ = importlib.import_module(F".{module_name}", '''transformers.models''' )
try:
return getattr(__UpperCAmelCase, __UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCAmelCase, '''__name__''', __UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ = importlib.import_module('''transformers''' )
if hasattr(__UpperCAmelCase, __UpperCAmelCase ):
return getattr(__UpperCAmelCase, __UpperCAmelCase )
return None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = False, __UpperCAmelCase = False, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = False, **__UpperCAmelCase, ) -> Dict:
'''simple docstring'''
snake_case_ = get_file_from_repo(
__UpperCAmelCase, __UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, resume_download=__UpperCAmelCase, proxies=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, revision=__UpperCAmelCase, local_files_only=__UpperCAmelCase, )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__UpperCAmelCase, encoding='''utf-8''' ) as reader:
return json.load(__UpperCAmelCase )
class a :
def __init__( self : Dict ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def A_ ( cls : List[Any] , lowercase_ : Optional[int] , **lowercase_ : Dict ):
snake_case_ = kwargs.pop('''config''' , lowercase_ )
snake_case_ = kwargs.pop('''trust_remote_code''' , lowercase_ )
snake_case_ = True
snake_case_ ,snake_case_ = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_ )
snake_case_ = config_dict.get('''feature_extractor_type''' , lowercase_ )
snake_case_ = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.feature_extractor_type``
snake_case_ = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_ )
if hasattr(lowercase_ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case_ = feature_extractor_class_from_name(lowercase_ )
snake_case_ = feature_extractor_auto_map is not None
snake_case_ = feature_extractor_class is not None or type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
snake_case_ = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
snake_case_ = kwargs.pop('''code_revision''' , lowercase_ )
if os.path.isdir(lowercase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ = FEATURE_EXTRACTOR_MAPPING[type(lowercase_ )]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A_ ( lowercase_ : Union[str, Any] , lowercase_ : Dict ):
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_ )
| 593 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Any = [0] * no_of_processes
lowercase_ : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCAmelCase_ ):
lowercase_ : Tuple = burst_time[i]
lowercase_ : list[int] = []
lowercase_ : Union[str, Any] = 0
lowercase_ : Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase_ : str = []
lowercase_ : Optional[Any] = -1
for i in range(UpperCAmelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowercase_ : Optional[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase_ : str = i
total_time += burst_time[target_process]
completed += 1
lowercase_ : int = 0
lowercase_ : Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : str = [0] * no_of_processes
for i in range(UpperCAmelCase_ ):
lowercase_ : Any = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__SCREAMING_SNAKE_CASE =4
__SCREAMING_SNAKE_CASE =[2, 5, 3, 7]
__SCREAMING_SNAKE_CASE =[0, 0, 0, 0]
__SCREAMING_SNAKE_CASE =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 425 |
'''simple docstring'''
from typing import List
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = {key: len(UpperCAmelCase_ ) for key, value in gen_kwargs.items() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_UpperCamelCase : Tuple = max(lists_lengths.values() , default=0 )
return max(1 , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = []
for group_idx in range(UpperCAmelCase_ ):
_UpperCamelCase : int = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_UpperCamelCase : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_UpperCamelCase : Tuple = range(UpperCAmelCase_ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCAmelCase_ )
return shards_indices_per_group
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Tuple = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
if num_shards == 1:
return [dict(UpperCAmelCase_ )]
else:
_UpperCamelCase : str = _distribute_shards(num_shards=UpperCAmelCase_ , max_num_jobs=UpperCAmelCase_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCAmelCase_ ) )
]
def A__ ( UpperCAmelCase_ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCAmelCase_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = {len(UpperCAmelCase_ ) for value in gen_kwargs.values() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )}
_UpperCamelCase : Union[str, Any] = {}
for size in list_sizes:
_UpperCamelCase : str = list(range(UpperCAmelCase_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_UpperCamelCase : Union[str, Any] = dict(UpperCAmelCase_ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = [value[i] for i in indices_per_size[len(UpperCAmelCase_ )]]
return shuffled_kwargs
| 195 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , snake_case__ )
SCREAMING_SNAKE_CASE__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE__ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = is_small_dataset(snake_case__ )
assert result == expected
| 720 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : List[str] = DownloadConfig
A_ : int = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : Optional[int] = DownloadConfig
A_ : List[Any] = DownloadMode
A_ : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 616 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowerCamelCase )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
metadata={"help": "The csv file to plot."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_UpperCAmelCase = list_field(
default=a__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
int(_lowerCamelCase )
return True
except ValueError:
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
try:
float(_lowerCamelCase )
return True
except ValueError:
return False
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = args
_lowerCAmelCase : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file ,newline='' ) as csv_file:
_lowerCAmelCase : str = csv.DictReader(_A )
for row in reader:
_lowerCAmelCase : Dict = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_lowerCAmelCase : Any = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_lowerCAmelCase : str = float(row['result'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : str = plt.subplots()
_lowerCAmelCase : Optional[Any] = 'Time usage' if self.args.is_time else 'Memory usage'
_lowerCAmelCase : List[str] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_lowerCAmelCase : str = sorted(set(self.result_dict[model_name]['bsz'] ) )
_lowerCAmelCase : Tuple = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_lowerCAmelCase : Dict = self.result_dict[model_name]['result']
((_lowerCAmelCase), (_lowerCAmelCase)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowerCAmelCase : int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowerCAmelCase : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] ,dtype=_A ,)
else:
_lowerCAmelCase : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] ,dtype=np.floataa ,)
((_lowerCAmelCase), (_lowerCAmelCase)) : Tuple = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_lowerCAmelCase : Optional[int] = np.asarray(_A ,_A )[: len(_A )]
plt.scatter(
_A ,_A ,label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_A ,_A ,'--' )
title_str += F""" {label_model_name} vs."""
_lowerCAmelCase : Optional[Any] = title_str[:-4]
_lowerCAmelCase : List[str] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = HfArgumentParser(_lowerCamelCase )
_lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()[0]
_lowerCAmelCase : Tuple = Plot(args=_lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 259 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A=None ,_A=1 ):
'''simple docstring'''
_lowerCAmelCase : Any = tokenizer
_lowerCAmelCase : str = dataset
_lowerCAmelCase : List[Any] = len(_A ) if n_tasks is None else n_tasks
_lowerCAmelCase : Union[str, Any] = n_copies
def __iter__( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_lowerCAmelCase : List[Any] = self.tokenizer(_A ,padding=_A ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = start_length
_lowerCAmelCase : str = eof_strings
_lowerCAmelCase : int = tokenizer
def __call__( self ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowerCAmelCase : str = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_A )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
_lowerCAmelCase : Any = batch['ids'].shape[-1]
_lowerCAmelCase : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
_lowerCAmelCase : List[Any] = batch['task_id'].repeat(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
_lowerCAmelCase : Union[str, Any] = generated_tokens.cpu().numpy()
_lowerCAmelCase : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowerCAmelCase : Union[str, Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowerCAmelCase : Optional[int] = 'false'
if args.num_workers is None:
_lowerCAmelCase : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowerCAmelCase : List[str] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
_lowerCAmelCase : List[str] = tokenizer.eos_token
_lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowerCAmelCase : int = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
_lowerCAmelCase : List[Any] = load_dataset('openai_humaneval' )
_lowerCAmelCase : int = load_metric('code_eval' )
_lowerCAmelCase : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_lowerCAmelCase : int = args.n_samples // args.batch_size
_lowerCAmelCase : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowerCAmelCase : Dict = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_lowerCAmelCase, _lowerCAmelCase : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
_lowerCAmelCase : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = human_eval['test'][task]['test']
_lowerCAmelCase : Union[str, Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_lowerCAmelCase, _lowerCAmelCase : List[str] = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 259 | 1 |
"""simple docstring"""
from math import sqrt
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ :str = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ :Tuple = False
for divisor in range(2 , int(round(sqrt(__UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ :Optional[Any] = False
break
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'status' must been from type bool"
return status
def _snake_case ( lowercase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ :Any = list(range(2 , n + 1 ) )
lowerCAmelCase_ :int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__UpperCamelCase ) ):
for j in range(i + 1 , len(__UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ :int = 0
# filters actual prime numbers.
lowerCAmelCase_ :Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ :Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__UpperCamelCase ):
ans.append(__UpperCamelCase )
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ :int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ :Tuple = 2
lowerCAmelCase_ :Optional[int] = number
if number == 0 or number == 1:
ans.append(__UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__UpperCamelCase ):
while quotient != 1:
if is_prime(__UpperCamelCase ) and (quotient % factor == 0):
ans.append(__UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__UpperCamelCase )
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'ans' must been from type list"
return ans
def _snake_case ( lowercase__ : int ) -> Dict:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ :Any = 0
# prime factorization of 'number'
lowerCAmelCase_ :Optional[int] = prime_factorization(__UpperCamelCase )
lowerCAmelCase_ :str = max(__UpperCamelCase )
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'ans' must been from type int"
return ans
def _snake_case ( lowercase__ : Any ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ :Union[str, Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ :Optional[Any] = prime_factorization(__UpperCamelCase )
lowerCAmelCase_ :Optional[int] = min(__UpperCamelCase )
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'ans' must been from type int"
return ans
def _snake_case ( lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case ( lowercase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case ( lowercase__ : Any ) -> int:
'''simple docstring'''
assert (
isinstance(__UpperCamelCase , __UpperCamelCase ) and (number > 2) and is_even(__UpperCamelCase )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ :Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ :List[str] = get_prime_numbers(__UpperCamelCase )
lowerCAmelCase_ :int = len(__UpperCamelCase )
# run variable for while-loops.
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :List[str] = None
# exit variable. for break up the loops
lowerCAmelCase_ :Any = True
while i < len_pn and loop:
lowerCAmelCase_ :str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ :str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and (len(__UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ :int = 0
while numbera != 0:
lowerCAmelCase_ :Dict = numbera % numbera
lowerCAmelCase_ :List[str] = numbera
lowerCAmelCase_ :int = rest
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any ) -> Dict:
'''simple docstring'''
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ :Any = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ :List[str] = prime_factorization(__UpperCamelCase )
lowerCAmelCase_ :Dict = prime_factorization(__UpperCamelCase )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ :Any = []
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[int] = max(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ :Union[str, Any] = prime_fac_a.count(__UpperCamelCase )
lowerCAmelCase_ :List[str] = prime_fac_a.count(__UpperCamelCase )
for _ in range(max(__UpperCamelCase , __UpperCamelCase ) ):
ans *= n
else:
lowerCAmelCase_ :Any = prime_fac_a.count(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
ans *= n
done.append(__UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ :List[Any] = prime_fac_a.count(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
ans *= n
done.append(__UpperCamelCase )
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case ( lowercase__ : List[Any] ) -> str:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__UpperCamelCase ):
ans += 1
# precondition
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and is_prime(
__UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
assert (
is_prime(__UpperCamelCase ) and is_prime(__UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ :List[Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ :Any = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(__UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(__UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and ans[0] != p_number_a
and ans[len(__UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case ( lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ :str = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(__UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case ( lowercase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ :List[str] = get_divisors(__UpperCamelCase )
# precondition
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(__UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case ( lowercase__ : Tuple , lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ :Union[str, Any] = gcd(abs(__UpperCamelCase ) , abs(__UpperCamelCase ) )
# precondition
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case ( lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ :Tuple = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :Optional[int] = 1
lowerCAmelCase_ :List[Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ :str = ans
ans += fiba
lowerCAmelCase_ :List[Any] = tmp
return ans
| 711 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = PegasusConfig
UpperCAmelCase_ :Optional[int] = {}
UpperCAmelCase_ :List[str] = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=40 , __A=2 , __A=1 , __A=0 , ) -> int:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Union[str, Any] = seq_length
lowerCAmelCase_ :List[str] = is_training
lowerCAmelCase_ :Tuple = use_labels
lowerCAmelCase_ :str = vocab_size
lowerCAmelCase_ :Optional[int] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :Any = intermediate_size
lowerCAmelCase_ :Dict = hidden_dropout_prob
lowerCAmelCase_ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ :List[Any] = max_position_embeddings
lowerCAmelCase_ :Union[str, Any] = eos_token_id
lowerCAmelCase_ :str = pad_token_id
lowerCAmelCase_ :str = bos_token_id
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ :str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ :str = prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A ) -> Tuple:
lowerCAmelCase_ :str = TFPegasusModel(config=__A ).get_decoder()
lowerCAmelCase_ :str = inputs_dict["""input_ids"""]
lowerCAmelCase_ :Optional[int] = input_ids[:1, :]
lowerCAmelCase_ :Dict = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase_ :Union[str, Any] = inputs_dict["""head_mask"""]
lowerCAmelCase_ :int = 1
# first forward pass
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ :str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ :Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Optional[int] = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ :Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ :Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None , lowercase__ : int=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase_ :str = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ :Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase_ :Union[str, Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ :Union[str, Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = True
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :str = TFPegasusModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ :str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase_ :Optional[int] = "google/pegasus-xsum"
@cached_property
def __lowerCAmelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self , **__A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def __lowerCAmelCase ( self , **__A ) -> str:
lowerCAmelCase_ :str = self.tokenizer(self.src_text , **__A , padding=__A , return_tensors="""tf""" )
lowerCAmelCase_ :int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
lowerCAmelCase_ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 256 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = inspect.getfile(accelerate.test_utils )
snake_case_ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case_ : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case_ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
| 60 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
def _UpperCamelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(a ) for k, v in self.__dict__.items()} )
| 198 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = ReformerTokenizer
lowercase__ = ReformerTokenizerFast
lowercase__ = True
lowercase__ = False
lowercase__ = True
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = ReformerTokenizer(__a , keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__a) , 10_00)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self , __a=15) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
# Simple input
_UpperCamelCase = '''This is a simple input'''
_UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCamelCase = ('''This is a simple input''', '''This is a pair''')
_UpperCamelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''')
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''')
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='''max_length''')
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='''max_length''')
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='''max_length''' , )
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ReformerTokenizer(__a , keep_accents=__a)
_UpperCamelCase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a) , [2_85, 46, 10, 1_70, 3_82] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''')
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(__a , self.big_tokenizer.encode(__a))
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a))
@require_torch
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys())[:10]
_UpperCamelCase = ''' '''.join(__a)
_UpperCamelCase = self.big_tokenizer.encode_plus(__a , return_tensors='''pt''')
_UpperCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''')
_UpperCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_UpperCamelCase = encoded_sequence['''input_ids'''].shape
_UpperCamelCase = ReformerModel(__a)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a)
model(**__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_UpperCamelCase = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__a , sequences=__a , )
| 78 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_a = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_a = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_a = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_a = F"""down_blocks.{i}.resnets.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_a = F"""down_blocks.{i}.attentions.{j}."""
_a = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_a = F"""up_blocks.{i}.resnets.{j}."""
_a = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_a = F"""up_blocks.{i}.attentions.{j}."""
_a = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_a = F"""down_blocks.{i}.downsamplers.0.conv."""
_a = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_a = """mid_block.attentions.0."""
_a = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_a = F"""mid_block.resnets.{j}."""
_a = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCamelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_a = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_a = F"""encoder.down_blocks.{i}.resnets.{j}."""
_a = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_a = F"""down_blocks.{i}.downsamplers.0."""
_a = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_a = F"""up_blocks.{i}.upsamplers.0."""
_a = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_a = F"""decoder.up_blocks.{i}.resnets.{j}."""
_a = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_a = F"""mid_block.resnets.{i}."""
_a = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_a = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
return w.reshape(*w.shape, 1, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCamelCase = v.replace(__snake_case, __snake_case )
_UpperCamelCase = v
_UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
_UpperCamelCase = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_a = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_a = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_a = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
_UpperCamelCase = k[: -len('''.q_proj.weight''' )]
_UpperCamelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
_UpperCamelCase = k[: -len('''.q_proj.bias''' )]
_UpperCamelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
_UpperCamelCase = [None, None, None]
_UpperCamelCase = v
continue
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
_UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case )
_UpperCamelCase = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_a = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_a = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_a = load_file(unet_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_a = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_a = load_file(vae_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_a = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_a = load_file(text_enc_path, device="""cpu""")
else:
_a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_a = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_a = convert_unet_state_dict(unet_state_dict)
_a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_a = convert_vae_state_dict(vae_state_dict)
_a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_a = convert_text_enc_state_dict_vaa(text_enc_dict)
_a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_a = convert_text_enc_state_dict(text_enc_dict)
_a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_a = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_a = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_a = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 78 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase__ = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase__ = BASE_URL + '/user'
# https://github.com/settings/tokens
UpperCamelCase__ = os.environ.get('USER_TOKEN', '')
def UpperCamelCase__ ( UpperCAmelCase_ ) -> dict[Any, Any]:
'''simple docstring'''
_lowercase : Any = {
'''Authorization''': F'token {auth_token}',
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.') | 322 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCamelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase__ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase__ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint) | 322 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Optional[int] = list(range(len(UpperCamelCase__ ) ) )
UpperCamelCase__ : Optional[int] = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
UpperCamelCase__ : float = 0
UpperCamelCase__ : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCamelCase__ : int = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase__ : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ : Any = logging.get_logger(__name__)
def A ( UpperCamelCase_ : bool , UpperCamelCase_ : bool ) -> str:
'''simple docstring'''
def run_func(UpperCamelCase_ : Dict ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = random.Random()
lowerCAmelCase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :TensorFlowBenchmarkArguments
snake_case__ :PretrainedConfig
snake_case__ :str = "TensorFlow"
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return tf.__version__
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_inference )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_speed(_train )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_inference_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_inference )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __magic_name__ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
lowerCAmelCase__ = self._prepare_train_func(__magic_name__ , __magic_name__ , __magic_name__ )
return self._measure_memory(_train )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCAmelCase__ = (
hasattr(__magic_name__ , "architectures" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCAmelCase__ = TF_MODEL_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(__magic_name__ , "vocab_size" ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__magic_name__ , decoder_input_ids=__magic_name__ , training=__magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
lowerCAmelCase__ = (
hasattr(__magic_name__ , "architectures" )
and isinstance(config.architectures , __magic_name__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__("transformers" , fromlist=[model_class] )
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = model_cls(__magic_name__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
lowerCAmelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__magic_name__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(__magic_name__ , "vocab_size" ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(__magic_name__ , __magic_name__ , __magic_name__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase__ = model(__magic_name__ , decoder_input_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
lowerCAmelCase__ = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase__ = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )[0]
lowerCAmelCase__ = tf.gradients(__magic_name__ , model.trainable_variables )
return gradients
lowerCAmelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__magic_name__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase__ = timeit.repeat(
__magic_name__ , repeat=self.args.repeat , number=10 , )
return min(__magic_name__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
lowerCAmelCase__ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won\'t log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
lowerCAmelCase__ = 'N/A'
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase__ = nvml.nvmlDeviceGetMemoryInfo(__magic_name__ )
lowerCAmelCase__ = meminfo.used
lowerCAmelCase__ = Memory(__magic_name__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = measure_peak_memory_cpu(__magic_name__ )
lowerCAmelCase__ = Memory(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase__ = stop_memory_tracing(__magic_name__ )
if memory is None:
lowerCAmelCase__ = summary.total
else:
lowerCAmelCase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : Optional[int] = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
a__ = RobertaTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Optional[Any]="replace" , lowerCAmelCase__ :Dict="<s>" , lowerCAmelCase__ :Optional[int]="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Tuple="<s>" , lowerCAmelCase__ :Tuple="<unk>" , lowerCAmelCase__ :Dict="<pad>" , lowerCAmelCase__ :Union[str, Any]="<mask>" , lowerCAmelCase__ :str=False , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :str , ) -> Dict:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
snake_case_ : Any = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
snake_case_ : List[str] = add_prefix_space
snake_case_ : Optional[Any] = pre_tok_class(**lowerCAmelCase__ )
snake_case_ : Tuple = add_prefix_space
snake_case_ : List[Any] = "post_processor"
snake_case_ : List[str] = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
snake_case_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : Optional[int] = tuple(state["sep"] )
if "cls" in state:
snake_case_ : List[str] = tuple(state["cls"] )
snake_case_ : Any = False
if state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
snake_case_ : List[Any] = add_prefix_space
snake_case_ : Optional[int] = True
if state.get("trim_offsets" , lowerCAmelCase__ ) != trim_offsets:
snake_case_ : Tuple = trim_offsets
snake_case_ : Optional[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(lowerCAmelCase__ , state.pop("type" ) )
snake_case_ : str = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _A ( self :str , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
snake_case_ : Union[str, Any] = value
def _A ( self :Optional[Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :List[str] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Dict = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :int , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Any ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Any = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=None ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 656 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 656 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for part_id in partition_order:
UpperCAmelCase__ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(a__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(100 ).repartition(1 )
UpperCAmelCase__ = Spark(a__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(10 ).repartition(2 )
UpperCAmelCase__ = [1, 0]
UpperCAmelCase__ = _generate_iterable_examples(a__, a__ ) # Reverse the partitions.
UpperCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(a__, a__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase__ , UpperCAmelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(10 ).repartition(1 )
UpperCAmelCase__ = SparkExamplesIterable(a__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCAmelCase__ = lambda __A : x.reverse()
UpperCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(a__, [2, 1, 0] )
UpperCAmelCase__ = SparkExamplesIterable(a__ ).shuffle_data_sources(a__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a__ ):
UpperCAmelCase__ , UpperCAmelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase__ = SparkExamplesIterable(a__ ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(a__, [0, 2] )
for i, (row_id, row_dict) in enumerate(a__ ):
UpperCAmelCase__ , UpperCAmelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase__ = SparkExamplesIterable(a__ ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(a__, [1, 3] )
for i, (row_id, row_dict) in enumerate(a__ ):
UpperCAmelCase__ , UpperCAmelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase__ = spark.range(100 ).repartition(1 )
UpperCAmelCase__ = Spark(a__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 486 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "CLIPImageProcessor"
lowerCAmelCase__ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 627 | 0 |
"""simple docstring"""
import operator as op
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = []
A_ : List[str] = lambda snake_case__ , snake_case__ : int(x / y ) # noqa: E731 integer division operation
A_ : Tuple = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(UpperCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ )
else:
A_ : int = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ )
A_ : Tuple = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ )
stack.append(
str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_lowerCAmelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 715 |
"""simple docstring"""
_lowerCAmelCase = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase = ["a", "b", "c", "d", "e"]
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : int = start
# add current to visited
visited.append(snake_case__ )
A_ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
A_ : Optional[Any] = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# if all neighbors visited add current to sort
sort.append(snake_case__ )
# if all vertices haven't been visited select a new one to visit
if len(snake_case__ ) != len(snake_case__ ):
for vertice in vertices:
if vertice not in visited:
A_ : Optional[Any] = topological_sort(snake_case__ , snake_case__ , snake_case__ )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase = topological_sort("a", [], [])
print(sort)
| 480 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : int = TypeVar('T')
class __lowercase (Generic[T] ):
"""simple docstring"""
_snake_case = 42 # Cache store of keys
_snake_case = 42 # References of the keys in cache
_snake_case = 10 # Maximum capacity of cache
def __init__( self , A ) -> None:
snake_case : str = deque()
snake_case : Optional[Any] = set()
if not n:
snake_case : List[Any] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
snake_case : str = n
def UpperCAmelCase ( self , A ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case : Dict = self.dq_store.pop()
self.key_reference.remove(A )
else:
self.dq_store.remove(A )
self.dq_store.appendleft(A )
self.key_reference.add(A )
def UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(A )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 587 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase ,lowercase ,lowercase ,lowercase )
for node in graph )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase ,lowercase ,lowercase ,lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | 1 |
import requests
__a = """YOUR API KEY"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = giphy_api_key ) ->list:
UpperCAmelCase = """+""".join(query.split() )
UpperCAmelCase = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCAmelCase = requests.get(lowerCAmelCase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 627 |
__a = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 1_0, """L""": 5_0, """C""": 1_0_0, """D""": 5_0_0, """M""": 1_0_0_0}
UpperCAmelCase = 0
UpperCAmelCase = 0
while place < len(lowerCAmelCase_ ):
if (place + 1 < len(lowerCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
UpperCAmelCase = []
for arabic, roman in ROMAN:
((UpperCAmelCase) , (UpperCAmelCase)) = divmod(lowerCAmelCase_ , lowerCAmelCase_ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627 | 1 |
"""simple docstring"""
class a :
def __init__( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = name
_UpperCAmelCase = value
_UpperCAmelCase = weight
def __repr__( self : str ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.value
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.name
def lowerCAmelCase_ ( self : List[Any] ):
return self.weight
def lowerCAmelCase_ ( self : List[str] ):
return self.value / self.weight
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(len(lowercase ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = sorted(lowercase ,key=lowercase ,reverse=lowercase )
_UpperCAmelCase = []
_UpperCAmelCase , _UpperCAmelCase = 0.0, 0.0
for i in range(len(lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | """simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 10**-1 ):
"""simple docstring"""
_UpperCAmelCase = cross(lowercase ,lowercase )
_UpperCAmelCase = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 277 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Dict =logging.get_logger(__name__)
_UpperCamelCase : int ={
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Union[str, Any] = "xlm-roberta"
def __init__( self ,A__=30522 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=512 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__ )
_A : Any = vocab_size
_A : List[str] = hidden_size
_A : Dict = num_hidden_layers
_A : str = num_attention_heads
_A : Tuple = hidden_act
_A : str = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = max_position_embeddings
_A : int = type_vocab_size
_A : Optional[Any] = initializer_range
_A : List[str] = layer_norm_eps
_A : Union[str, Any] = position_embedding_type
_A : List[str] = use_cache
_A : int = classifier_dropout
class UpperCAmelCase__ ( __snake_case ):
@property
def A__ ( self ):
if self.task == "multiple-choice":
_A : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 332 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[Any] = "M-CLIP"
def __init__( self ,A__=1024 ,A__=768 ,**A__ ):
_A : Tuple = transformerDimSize
_A : Optional[Any] = imageDimSize
super().__init__(**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : int = MCLIPConfig
def __init__( self ,A__ ,*A__ ,**A__ ):
super().__init__(A__ ,*A__ ,**A__ )
_A : Optional[int] = XLMRobertaModel(A__ )
_A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def A__ ( self ,A__ ,A__ ):
_A : str = self.transformer(input_ids=A__ ,attention_mask=A__ )[0]
_A : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A__ ), embs
| 332 | 1 |
def __UpperCAmelCase ( a_ , a_):
while b:
snake_case_ , snake_case_ = b, a % b
return a
def __UpperCAmelCase ( a_ , a_):
return a if b == 0 else euclidean_gcd_recursive(a_ , a % b)
def __UpperCAmelCase ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5)}''')
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3)}''')
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3)}''')
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6)}''')
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3)}''')
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5)}''')
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3)}''')
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3)}''')
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6)}''')
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3)}''')
if __name__ == "__main__":
main()
| 198 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = None
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = False
lowerCAmelCase = None
lowerCAmelCase = None
def _UpperCamelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(a ) for k, v in self.__dict__.items()} )
| 198 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE : List[str] = {"target_lang": "fi", "source_lang": "en"}
SCREAMING_SNAKE_CASE : Optional[int] = ">>zh<<"
SCREAMING_SNAKE_CASE : Optional[Any] = "Helsinki-NLP/"
if is_torch_available():
SCREAMING_SNAKE_CASE : Tuple = "pt"
elif is_tf_available():
SCREAMING_SNAKE_CASE : Union[str, Any] = "tf"
else:
SCREAMING_SNAKE_CASE : List[str] = "jax"
@require_sentencepiece
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] =MarianTokenizer
lowercase : Any =False
lowercase : List[str] =True
def UpperCamelCase ( self ):
super().setUp()
lowercase_ :List[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase_ :Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase_ :Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
lowercase_ :int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **UpperCamelCase_ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = '''</s>'''
lowercase_ :str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCamelCase_ ) , 9 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
lowercase_ :Dict = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase_ , batch.input_ids[0] )
lowercase_ :Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = [x.name for x in Path(UpperCamelCase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCamelCase_ )
MarianTokenizer.from_pretrained(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.get_tokenizer()
lowercase_ :int = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.get_tokenizer()
lowercase_ :Union[str, Any] = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCamelCase ( self ):
# fmt: off
lowercase_ :str = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def UpperCamelCase ( self ):
lowercase_ :Dict = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
lowercase_ :Optional[int] = '''Tämä on testi'''
lowercase_ :Dict = '''This is a test'''
lowercase_ :int = [76, 7, 2047, 2]
lowercase_ :Union[str, Any] = [69, 12, 11, 940, 2]
lowercase_ :Any = tokenizer(UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Tuple = tokenizer(text_target=UpperCamelCase_ ).input_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 705 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( _a , _a=0.999 , _a="cosine" , ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase_ :Any = []
for i in range(_a ):
lowercase_ :List[str] = i / num_diffusion_timesteps
lowercase_ :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_a ) / alpha_bar_fn(_a ) , _a ) )
return torch.tensor(_a , dtype=torch.floataa )
class UpperCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
lowercase : Tuple =[e.name for e in KarrasDiffusionSchedulers]
lowercase : Tuple =2
@register_to_config
def __init__( self , UpperCamelCase_ = 1000 , UpperCamelCase_ = 0.0_0085 , UpperCamelCase_ = 0.012 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = "linspace" , UpperCamelCase_ = 0 , ):
if trained_betas is not None:
lowercase_ :int = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ :List[str] = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ :int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ :Optional[int] = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
lowercase_ :Dict = betas_for_alpha_bar(UpperCamelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase_ :str = 1.0 - self.betas
lowercase_ :Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = use_karras_sigmas
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
if schedule_timesteps is None:
lowercase_ :List[str] = self.timesteps
lowercase_ :int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase_ :Dict = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
lowercase_ :Union[str, Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , ):
lowercase_ :List[str] = self.index_for_timestep(UpperCamelCase_ )
lowercase_ :Optional[Any] = self.sigmas[step_index]
lowercase_ :Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
lowercase_ :Optional[Any] = num_inference_steps
lowercase_ :Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase_ :Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase_ :Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ :List[Any] = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase_ :Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ :Dict = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowercase_ :Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase_ :Tuple = np.log(UpperCamelCase_ )
lowercase_ :Dict = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
if self.config.use_karras_sigmas:
lowercase_ :int = self._convert_to_karras(in_sigmas=UpperCamelCase_ , num_inference_steps=self.num_inference_steps )
lowercase_ :Optional[int] = np.array([self._sigma_to_t(UpperCamelCase_ , UpperCamelCase_ ) for sigma in sigmas] )
lowercase_ :Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase_ :Optional[int] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
lowercase_ :Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase_ :Any = torch.from_numpy(UpperCamelCase_ )
lowercase_ :List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCamelCase_ ).startswith('''mps''' ):
# mps does not support float64
lowercase_ :int = timesteps.to(UpperCamelCase_ , dtype=torch.floataa )
else:
lowercase_ :Optional[Any] = timesteps.to(device=UpperCamelCase_ )
# empty dt and derivative
lowercase_ :List[str] = None
lowercase_ :List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase_ :int = defaultdict(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
# get log sigma
lowercase_ :Union[str, Any] = np.log(UpperCamelCase_ )
# get distribution
lowercase_ :Optional[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase_ :List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase_ :str = low_idx + 1
lowercase_ :Any = log_sigmas[low_idx]
lowercase_ :int = log_sigmas[high_idx]
# interpolate sigmas
lowercase_ :Dict = (low - log_sigma) / (low - high)
lowercase_ :str = np.clip(UpperCamelCase_ , 0 , 1 )
# transform interpolation to time range
lowercase_ :Dict = (1 - w) * low_idx + w * high_idx
lowercase_ :int = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :float = in_sigmas[-1].item()
lowercase_ :float = in_sigmas[0].item()
lowercase_ :int = 7.0 # 7.0 is the value used in the paper
lowercase_ :Optional[Any] = np.linspace(0 , 1 , UpperCamelCase_ )
lowercase_ :List[str] = sigma_min ** (1 / rho)
lowercase_ :List[Any] = sigma_max ** (1 / rho)
lowercase_ :Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ):
lowercase_ :Any = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
lowercase_ :Any = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase_ :Optional[int] = self.sigmas[step_index]
lowercase_ :List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase_ :Optional[int] = self.sigmas[step_index - 1]
lowercase_ :Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase_ :List[Any] = 0
lowercase_ :List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase_ :Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ :List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ :Dict = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ :Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase_ :List[str] = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
lowercase_ :str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase_ :Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase_ :Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowercase_ :str = derivative
lowercase_ :Union[str, Any] = dt
lowercase_ :Optional[int] = sample
else:
# 2. 2nd order / Heun's method
lowercase_ :str = (sample - pred_original_sample) / sigma_next
lowercase_ :List[str] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase_ :Union[str, Any] = self.dt
lowercase_ :Any = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase_ :List[Any] = None
lowercase_ :List[str] = None
lowercase_ :Dict = None
lowercase_ :int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase_ :List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
lowercase_ :Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase_ :Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase_ :Union[str, Any] = self.timesteps.to(original_samples.device )
lowercase_ :int = timesteps.to(original_samples.device )
lowercase_ :int = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
lowercase_ :Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase_ :List[str] = sigma.unsqueeze(-1 )
lowercase_ :List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 441 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
_UpperCAmelCase : Any = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
_UpperCAmelCase : List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
_UpperCAmelCase : Tuple = BeautifulSoup(res.text, '''html.parser''')
_UpperCAmelCase : Tuple = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 107 |
from math import isqrt, loga
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
__snake_case = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__A ,__A ):
__snake_case = False
return [i for i in range(2 ,__A ) if is_prime[i]]
def lowerCamelCase__ ( __A :int = 8_0_0_8_0_0 ,__A :int = 8_0_0_8_0_0 ):
"""simple docstring"""
__snake_case = degree * loga(__A )
__snake_case = int(__A )
__snake_case = calculate_prime_numbers(__A )
__snake_case = 0
__snake_case = 0
__snake_case = len(__A ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 268 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> list:
lowerCamelCase_ : Union[str, Any] =[0] * len(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
# use last results for better performance - dynamic programming
lowerCamelCase_ : List[str] =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCamelCase_ : List[str] =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCamelCase_ : str =j
return prefix_result
def _snake_case ( lowerCamelCase__ : str ) -> int:
return max(prefix_function(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :BigBirdConfig
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().setup()
lowerCamelCase_ : List[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
lowerCamelCase_ : List[str] =super().__call__(*snake_case__ , **snake_case__ )
lowerCamelCase_ : Any =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = FlaxBigBirdForNaturalQuestionsModule
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> Dict:
def cross_entropy(lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ):
lowerCamelCase_ : List[str] =logits.shape[-1]
lowerCamelCase_ : Tuple =(labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("f4" )
lowerCamelCase_ : Any =jax.nn.log_softmax(lowerCamelCase__ , axis=-1 )
lowerCamelCase_ : str =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase_ : int =reduction(lowerCamelCase__ )
return loss
lowerCamelCase_ : str =partial(lowerCamelCase__ , reduction=jnp.mean )
lowerCamelCase_ : Union[str, Any] =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase__ :
_UpperCAmelCase :str = "google/bigbird-roberta-base"
_UpperCAmelCase :int = 3000
_UpperCAmelCase :int = 10500
_UpperCAmelCase :int = 128
_UpperCAmelCase :int = 3
_UpperCAmelCase :int = 1
_UpperCAmelCase :int = 5
# tx_args
_UpperCAmelCase :float = 3e-5
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :int = 20000
_UpperCAmelCase :float = 0.00_95
_UpperCAmelCase :str = "bigbird-roberta-natural-questions"
_UpperCAmelCase :str = "training-expt"
_UpperCAmelCase :str = "data/nq-training.jsonl"
_UpperCAmelCase :str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : List[Any] ):
os.makedirs(self.base_dir , exist_ok=snake_case__ )
lowerCamelCase_ : List[Any] =os.path.join(self.base_dir , self.save_dir )
lowerCamelCase_ : str =self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase__ :
_UpperCAmelCase :int
_UpperCAmelCase :int = 4096 # no dynamic padding on TPUs
def __call__( self : int , snake_case__ : str ):
lowerCamelCase_ : Any =self.collate_fn(snake_case__ )
lowerCamelCase_ : Tuple =jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any ):
lowerCamelCase_ , lowerCamelCase_ : Any =self.fetch_inputs(features["input_ids"] )
lowerCamelCase_ : Optional[int] ={
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Dict =[self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : list ):
lowerCamelCase_ : Any =[1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None ) -> Tuple:
if seed is not None:
lowerCamelCase_ : List[Any] =dataset.shuffle(seed=lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) // batch_size ):
lowerCamelCase_ : Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase__ )
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) -> str:
def loss_fn(lowerCamelCase__ : Optional[int] ):
lowerCamelCase_ : Any =model_inputs.pop("start_labels" )
lowerCamelCase_ : Dict =model_inputs.pop("end_labels" )
lowerCamelCase_ : Union[str, Any] =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : Optional[Any] =state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple =outputs
return state.loss_fn(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(lowerCamelCase__ )
lowerCamelCase_ : str =jax.value_and_grad(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =grad_fn(state.params )
lowerCamelCase_ : int =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase_ : Optional[Any] =jax.lax.pmean(lowerCamelCase__ , "batch" )
lowerCamelCase_ : str =state.apply_gradients(grads=lowerCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _snake_case ( lowerCamelCase__ : int , **lowerCamelCase__ : str ) -> List[str]:
lowerCamelCase_ : List[str] =model_inputs.pop("start_labels" )
lowerCamelCase_ : str =model_inputs.pop("end_labels" )
lowerCamelCase_ : int =model_inputs.pop("pooled_labels" )
lowerCamelCase_ : List[Any] =state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] =outputs
lowerCamelCase_ : List[Any] =state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowercase__ ( train_state.TrainState ):
_UpperCAmelCase :Callable = struct.field(pytree_node=snake_case__ )
@dataclass
class lowercase__ :
_UpperCAmelCase :Args
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :Callable
_UpperCAmelCase :wandb
_UpperCAmelCase :Callable = None
def UpperCAmelCase__ ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any]=None ):
lowerCamelCase_ : Union[str, Any] =model.params
lowerCamelCase_ : Tuple =TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =restore_checkpoint(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase_ , lowerCamelCase_ : List[str] =build_tx(**snake_case__ )
lowerCamelCase_ : str =train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
lowerCamelCase_ : Any =args
lowerCamelCase_ : Optional[Any] =data_collator
lowerCamelCase_ : int =lr
lowerCamelCase_ : List[Any] =params
lowerCamelCase_ : Tuple =jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =self.args
lowerCamelCase_ : List[Any] =len(snake_case__ ) // args.batch_size
lowerCamelCase_ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase_ : int =jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase_ : str =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : int =get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ):
lowerCamelCase_ : Dict =self.data_collator(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase_ : int =jax_utils.unreplicate(state.step )
lowerCamelCase_ : Any =running_loss.item() / i
lowerCamelCase_ : Tuple =self.scheduler_fn(state_step - 1 )
lowerCamelCase_ : Tuple =self.evaluate(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =get_batched_dataset(snake_case__ , self.args.batch_size )
lowerCamelCase_ : str =len(snake_case__ ) // self.args.batch_size
lowerCamelCase_ : int =jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase_ : Any =0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
lowerCamelCase_ : str =self.data_collator(snake_case__ )
lowerCamelCase_ : Tuple =self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : Any ):
lowerCamelCase_ : int =jax_utils.unreplicate(snake_case__ )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> Dict:
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(lowerCamelCase__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : List[Any] =from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase_ : Tuple =from_bytes(state.opt_state , f.read() )
lowerCamelCase_ : Union[str, Any] =joblib.load(os.path.join(lowerCamelCase__ , "args.joblib" ) )
lowerCamelCase_ : str =joblib.load(os.path.join(lowerCamelCase__ , "data_collator.joblib" ) )
with open(os.path.join(lowerCamelCase__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase_ : Union[str, Any] =json.load(lowerCamelCase__ )
lowerCamelCase_ : str =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase_ : Tuple =num_train_steps - warmup_steps
lowerCamelCase_ : Any =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : int =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> List[str]:
def weight_decay_mask(lowerCamelCase__ : List[Any] ):
lowerCamelCase_ : Any =traverse_util.flatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Tuple ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[str] =optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ )
return tx, lr
| 244 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ):
snake_case__ : Tuple = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(__a )
snake_case__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(__a )
snake_case__ : int = tokenizer("""This is me""" , return_tensors="""pt""" )
snake_case__ : Dict = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Optional[int] = model.generate(**__a )
snake_case__ : int = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
snake_case__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(__a )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : Union[str, Any] = model_reloaded.generate(**__a )
self.assertTrue(torch.allclose(__a , __a ) )
def lowercase ( self : str ):
snake_case__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
snake_case__ : int = AutoModelForSeqaSeqLM.from_pretrained(__a )
snake_case__ : Any = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__a ):
model.save_pretrained(__a )
snake_case__ : List[str] = model.reverse_bettertransformer()
model.save_pretrained(__a )
| 648 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Tuple = 0
if start < end:
lowerCamelCase__ : Dict = randint(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = a[end]
lowerCamelCase__ : str = a[pivot]
lowerCamelCase__ : str = temp
lowerCamelCase__ , lowerCamelCase__ : Tuple = _in_place_partition(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
count += _in_place_quick_sort(UpperCAmelCase , UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(UpperCAmelCase , p + 1 , UpperCAmelCase )
return count
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Any = randint(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = a[end]
lowerCamelCase__ : List[str] = a[pivot]
lowerCamelCase__ : Dict = temp
lowerCamelCase__ : Tuple = start - 1
for index in range(UpperCAmelCase , UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase__ : Union[str, Any] = new_pivot_index + 1
lowerCamelCase__ : Dict = a[new_pivot_index]
lowerCamelCase__ : Tuple = a[index]
lowerCamelCase__ : Dict = temp
lowerCamelCase__ : List[Any] = a[new_pivot_index + 1]
lowerCamelCase__ : Any = a[end]
lowerCamelCase__ : Optional[int] = temp
return new_pivot_index + 1, count
_A : int = TemporaryFile()
_A : Dict = 1_00 # 1000 elements are to be sorted
_A , _A : List[Any] = 0, 1 # mean and standard deviation
_A : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
_A : Union[str, Any] = np.load(outfile)
_A : Optional[Any] = len(M) - 1
_A : Optional[Any] = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 315 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
UpperCAmelCase_ = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
UpperCAmelCase_ = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
UpperCAmelCase_ = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
UpperCAmelCase_ = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
UpperCAmelCase_ = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
UpperCAmelCase_ = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
UpperCAmelCase_ = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
UpperCAmelCase_ = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
UpperCAmelCase_ = key.replace('''image_encoder.module''' , '''flava.image_model''' )
UpperCAmelCase_ = key.replace('''text_encoder.module''' , '''flava.text_model''' )
UpperCAmelCase_ = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
UpperCAmelCase_ = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
UpperCAmelCase_ = key.replace('''text_projection''' , '''flava.text_projection''' )
UpperCAmelCase_ = key.replace('''image_projection''' , '''flava.image_projection''' )
UpperCAmelCase_ = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ = value
return upgrade
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = FlavaConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCAmelCase_ = FlavaConfig()
UpperCAmelCase_ = FlavaForPreTraining(__UpperCAmelCase ).eval()
UpperCAmelCase_ = convert_dalle_checkpoint(__UpperCAmelCase , __UpperCAmelCase , save_checkpoint=__UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )
UpperCAmelCase_ = upgrade_state_dict(__UpperCAmelCase , __UpperCAmelCase )
hf_model.load_state_dict(__UpperCAmelCase )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(__UpperCAmelCase )
UpperCAmelCase_ = count_parameters(__UpperCAmelCase ) + count_parameters(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 561 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase_ = logging.get_logger(__name__)
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase_ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __a ( self :List[Any]) -> int:
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , _lowercase , )
@cached_property
def __a ( self :List[Any]) -> "torch.device":
logger.info('''PyTorch: setting up devices''')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''')
if self.no_cuda:
UpperCAmelCase_ = torch.device('''cpu''')
UpperCAmelCase_ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase_ = smp.local_rank()
UpperCAmelCase_ = torch.device('''cuda''' , _lowercase)
UpperCAmelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK'''))
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(_lowercase)
return device
@property
def __a ( self :str) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __a ( self :Optional[Any]) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def __a ( self :List[str]) -> List[Any]:
return False
| 561 | 1 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = LightningModel(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['MaskFormerFeatureExtractor']
lowerCAmelCase__ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowerCAmelCase__ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 712 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = GPTaTokenizer
snake_case__ : str = GPTaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : Union[str, Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : int = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : List[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : str = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : List[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Tuple = '''This is a simple input'''
_lowerCamelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : Tuple = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : List[str] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : int = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : int = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Tuple = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : List[Any] = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''$$$'''
_lowerCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : Any = '''This is a simple input'''
_lowerCamelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : Any = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : str = '''Encode this.'''
_lowerCamelCase : Optional[Any] = '''This one too please.'''
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[Any] = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : str = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : Optional[int] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : Tuple = '''A photo of a cat'''
_lowerCamelCase : List[str] = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''bos'''
_lowerCamelCase : Optional[Any] = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : Any = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 598 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str | None:
'''simple docstring'''
_lowerCamelCase : str = ""
_lowerCamelCase : int
_lowerCamelCase : int
_lowerCamelCase : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCamelCase : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_( _lowerCamelCase ) -> list[str]:
'''simple docstring'''
_lowerCamelCase : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
_lowerCamelCase : int = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_( _lowerCamelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : list[int]
_lowerCamelCase : list[str]
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding="utf-8" )
_lowerCamelCase : Optional[int] = [int(_lowerCamelCase ) for number in data.strip().split("," )]
_lowerCamelCase : List[Any] = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
_lowerCamelCase : Union[str, Any] = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
_lowerCamelCase : List[str] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''') | 46 |
'''simple docstring'''
def A__ ( A : int , A : int):
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def A__ ( ):
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 173 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCamelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE_ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
SCREAMING_SNAKE_CASE = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ )]} )
# No kwarg
SCREAMING_SNAKE_CASE = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ )]} )
SCREAMING_SNAKE_CASE = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ )]} )
SCREAMING_SNAKE_CASE = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
SCREAMING_SNAKE_CASE = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(lowerCAmelCase__ , {'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
lowerCAmelCase__ , [
{'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
lowerCAmelCase__ , [
{'sequence': ANY(lowerCAmelCase__ ), 'labels': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], 'scores': [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels='politics' )
with self.assertRaises(lowerCAmelCase__ ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(lowerCAmelCase__ ):
classifier('Who are you voting for in 2020?' , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE = config.labelaid
SCREAMING_SNAKE_CASE = zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
SCREAMING_SNAKE_CASE = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 714 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''google/rembert''': 256,
}
__UpperCamelCase = '''▁'''
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RemBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 327 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ):
"""simple docstring"""
A_ , A_ = set(__UpperCamelCase ), [start]
while stack:
A_ = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
__a :str = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 86 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch | 86 | 1 |
def lowerCAmelCase ( snake_case__ : list )-> list:
def merge(snake_case__ : list , snake_case__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
A_ = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ : Dict = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 608 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase ( snake_case__ : int = 3 )-> qiskit.result.counts.Counts:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
A_ = QuantumRegister(snake_case__ , "qr" )
A_ = ClassicalRegister(snake_case__ , "cr" )
A_ = QuantumCircuit(snake_case__ , snake_case__ )
A_ = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
A_ = Aer.get_backend("qasm_simulator" )
A_ = execute(snake_case__ , snake_case__ , shots=10000 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 608 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a : Tuple = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE = 101 ) -> int:
"""simple docstring"""
UpperCamelCase = length
def __len__( self ) -> int:
"""simple docstring"""
return self.length
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return i
class __UpperCAmelCase :
"""simple docstring"""
def __call__( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE ), "labels": torch.tensor(SCREAMING_SNAKE_CASE )}
class __UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(120 , 80 )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
@require_torch_neuroncore
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'''--output_dir {output_dir}'''.split()
UpperCamelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'''--output_dir {output_dir}'''.split()
UpperCamelCase = ["torchrun"] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a : Optional[Any] = HfArgumentParser((TrainingArguments,))
__a : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
__a : Optional[Any] = DummyDataset(dataset_length)
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = list(range(len(lowercase_ ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a : int = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a : Optional[Any] = 2
__a : Dict = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a : List[Any] = None
| 606 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase = """document_qa"""
lowercase = AutoProcessor
lowercase = VisionEncoderDecoderModel
lowercase = ["""image""", """text"""]
lowercase = ["""text"""]
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCamelCase = task_prompt.replace("{user_input}" , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCamelCase = self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE , ).sequences
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
UpperCamelCase = re.sub(R"<.*?>" , "" , SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCamelCase = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 606 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
__SCREAMING_SNAKE_CASE : Dict = {
"input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ )["last_hidden_state"]
__SCREAMING_SNAKE_CASE : Dict = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 708 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
__SCREAMING_SNAKE_CASE : Optional[int] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__SCREAMING_SNAKE_CASE : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__SCREAMING_SNAKE_CASE : List[Any] = j
return prefix_result
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return max(prefix_function(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = 8
# DPR tok
SCREAMING_SNAKE_CASE : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE : Optional[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : Any = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , """rag_tokenizer""" )
SCREAMING_SNAKE_CASE : int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
SCREAMING_SNAKE_CASE : List[str] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
SCREAMING_SNAKE_CASE : List[str] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE : str = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 379 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''donut-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , lowerCamelCase_ : Union[str, Any]=2_24 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Dict=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Optional[Any]=[3, 6, 12, 24] , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : List[str]=4.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Any=1e-5 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 379 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if gpta_config_file == "":
lowerCAmelCase__ = GPTaConfig()
else:
lowerCAmelCase__ = GPTaConfig.from_json_file(lowercase__ )
lowerCAmelCase__ = GPTaModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowercase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 288 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_UpperCAmelCase : Optional[Any] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288 | 1 |
import re
def UpperCAmelCase__( __UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__UpperCAmelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : Dict = import_module('tasks' )
try:
__snake_case : Tuple = getattr(__UpperCAmelCase , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[int] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(__UpperCAmelCase ) )
__snake_case : List[Any] = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid={label: i for i, label in enumerate(__UpperCAmelCase )} , cache_dir=model_args.cache_dir , )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : Dict = (
TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : Optional[Any] = np.argmax(__UpperCAmelCase , axis=2 )
__snake_case , __snake_case : Optional[Any] = preds.shape
__snake_case : Dict = [[] for _ in range(__UpperCAmelCase )]
__snake_case : Optional[Any] = [[] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__UpperCAmelCase : EvalPrediction ) -> Dict:
__snake_case , __snake_case : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__UpperCAmelCase , __UpperCAmelCase ),
"precision": precision_score(__UpperCAmelCase , __UpperCAmelCase ),
"recall": recall_score(__UpperCAmelCase , __UpperCAmelCase ),
"f1": fa_score(__UpperCAmelCase , __UpperCAmelCase ),
}
# Data collator
__snake_case : List[str] = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Dict = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : str = trainer.evaluate()
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCAmelCase , __UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCAmelCase )
# Predict
if training_args.do_predict:
__snake_case : Optional[int] = TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case , __snake_case , __snake_case : Optional[int] = trainer.predict(__UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = align_predictions(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[Any] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , __UpperCAmelCase , __UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return results
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 576 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[8, 16, 32, 64] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=1 , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(_lowercase )
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = num_groups
def _snake_case ( self ) -> Dict:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def _snake_case ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> int:
return
@unittest.skip(reason="""Bit does not output attentions""" )
def _snake_case ( self ) -> int:
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def _snake_case ( self ) -> str:
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _snake_case ( self ) -> str:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def _snake_case ( self ) -> int:
pass
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _snake_case ( self ) -> Optional[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_lowercase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowerCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@require_torch
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (BitBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = BitConfig
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BitModelTester(self )
| 703 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self , lowercase = 2_000 , lowercase = 0.15 , lowercase = 0.01 , lowercase = 1_348.0 , lowercase = 1e-5 , lowercase = 1 , ) -> List[Any]:
# standard deviation of the initial noise distribution
lowerCAmelCase = sigma_max
# setable values
lowerCAmelCase = None
self.set_sigmas(lowercase , lowercase , lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> torch.FloatTensor:
return sample
def _snake_case ( self , lowercase , lowercase = None , lowercase = None ) -> Union[str, Any]:
lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase = torch.linspace(1 , lowercase , lowercase , device=lowercase )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None ) -> Dict:
lowerCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase , lowercase )
lowerCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase = torch.exp(torch.linspace(math.log(lowercase ) , math.log(lowercase ) , lowercase ) )
lowerCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
lowerCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase = self.get_adjacent_sigma(lowercase , lowercase ).to(sample.device )
lowerCAmelCase = torch.zeros_like(lowercase )
lowerCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase = diffusion.unsqueeze(-1 )
lowerCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase , device=sample.device , dtype=sample.dtype )
lowerCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase , prev_sample_mean=lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase = step_size.unsqueeze(-1 )
lowerCAmelCase = sample + step_size * model_output
lowerCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase = timesteps.to(original_samples.device )
lowerCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase ) * sigmas[:, None, None, None]
)
lowerCAmelCase = noise + original_samples
return noisy_samples
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 393 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Union[str, Any] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 347 |
def lowercase__ ( __snake_case : list , __snake_case : list ):
'''simple docstring'''
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__snake_case , __snake_case ) ) )
def lowercase__ ( __snake_case : list[float] ):
'''simple docstring'''
if point:
if isinstance(__snake_case , __snake_case ):
for item in point:
if not isinstance(__snake_case , (int, float) ):
UpperCAmelCase_ : Optional[int] = (
'Expected a list of numbers as input, found '
F"{type(__snake_case ).__name__}"
)
raise TypeError(__snake_case )
else:
UpperCAmelCase_ : int = F"Expected a list of numbers as input, found {type(__snake_case ).__name__}"
raise TypeError(__snake_case )
else:
raise ValueError('Missing an input' )
def lowercase__ ( __snake_case : list , __snake_case : list ):
'''simple docstring'''
_validate_point(__snake_case )
_validate_point(__snake_case )
if len(__snake_case ) != len(__snake_case ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__snake_case , __snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 406 | 0 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
lowercase__ : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ : str = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
lowercase__ : str = f'''{src_lang}-{tgt_lang}'''
lowercase__ : Optional[Any] = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : Dict = os.path.join(lowerCamelCase__ , """README.md""" )
print(f'''Generating {path}''' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case = Path(__file__).resolve().parent.parent.parent
__snake_case = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name) | 128 |
"""simple docstring"""
import heapq
def _lowerCamelCase ( lowerCamelCase__ : dict ):
lowercase__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__ : Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__ : Optional[Any] = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__ : List[Any] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | 128 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowercase : int , lowercase : int , lowercase : int , lowercase : Union[str, Any]=0.0 , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = False , lowercase : bool = True , lowercase : str = "layer_norm" , lowercase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case = only_cross_attention
_snake_case = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_snake_case = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_snake_case = AdaLayerNorm(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
_snake_case = AdaLayerNormZero(lowercase , lowercase )
else:
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
_snake_case = Attention(
query_dim=lowercase , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_snake_case = (
AdaLayerNorm(lowercase , lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(lowercase , elementwise_affine=lowercase )
)
_snake_case = Attention(
query_dim=lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=lowercase , dim_head=lowercase , dropout=lowercase , bias=lowercase , upcast_attention=lowercase , ) # is self-attn if encoder_hidden_states is none
else:
_snake_case = None
_snake_case = None
# 3. Feed-forward
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
_snake_case = FeedForward(lowercase , dropout=lowercase , activation_fn=lowercase , final_dropout=lowercase )
# let chunk size default to None
_snake_case = None
_snake_case = 0
def A ( self : int , lowercase : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = chunk_size
_snake_case = dim
def A ( self : Any , lowercase : torch.FloatTensor , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Dict[str, Any] = None , lowercase : Optional[torch.LongTensor] = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
_snake_case = self.norma(lowercase , lowercase )
elif self.use_ada_layer_norm_zero:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.norma(
lowercase , lowercase , lowercase , hidden_dtype=hidden_states.dtype )
else:
_snake_case = self.norma(lowercase )
_snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_snake_case = self.attna(
lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=lowercase , **lowercase , )
if self.use_ada_layer_norm_zero:
_snake_case = gate_msa.unsqueeze(1 ) * attn_output
_snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_snake_case = (
self.norma(lowercase , lowercase ) if self.use_ada_layer_norm else self.norma(lowercase )
)
_snake_case = self.attna(
lowercase , encoder_hidden_states=lowercase , attention_mask=lowercase , **lowercase , )
_snake_case = attn_output + hidden_states
# 3. Feed-forward
_snake_case = self.norma(lowercase )
if self.use_ada_layer_norm_zero:
_snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_snake_case = torch.cat(
[self.ff(lowercase ) for hid_slice in norm_hidden_states.chunk(lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_snake_case = self.ff(lowercase )
if self.use_ada_layer_norm_zero:
_snake_case = gate_mlp.unsqueeze(1 ) * ff_output
_snake_case = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : Optional[int] = None , lowercase : int = 4 , lowercase : float = 0.0 , lowercase : str = "geglu" , lowercase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case = int(dim * mult )
_snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_snake_case = GELU(lowercase , lowercase )
if activation_fn == "gelu-approximate":
_snake_case = GELU(lowercase , lowercase , approximate='tanh' )
elif activation_fn == "geglu":
_snake_case = GEGLU(lowercase , lowercase )
elif activation_fn == "geglu-approximate":
_snake_case = ApproximateGELU(lowercase , lowercase )
_snake_case = nn.ModuleList([] )
# project in
self.net.append(lowercase )
# project dropout
self.net.append(nn.Dropout(lowercase ) )
# project out
self.net.append(nn.Linear(lowercase , lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowercase ) )
def A ( self : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
for module in self.net:
_snake_case = module(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int , lowercase : int , lowercase : str = "none" ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , lowercase )
_snake_case = approximate
def A ( self : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def A ( self : List[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.proj(lowercase )
_snake_case = self.gelu(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , dim_out * 2 )
def A ( self : str , lowercase : int ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def A ( self : List[Any] , lowercase : Any ):
'''simple docstring'''
_snake_case , _snake_case = self.proj(lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : int , lowercase : int ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(lowercase , lowercase )
def A ( self : Tuple , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.proj(lowercase )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : List[Any] , lowercase : List[str] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Embedding(lowercase , lowercase )
_snake_case = nn.SiLU()
_snake_case = nn.Linear(lowercase , embedding_dim * 2 )
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase )
def A ( self : str , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self.linear(self.silu(self.emb(lowercase ) ) )
_snake_case , _snake_case = torch.chunk(lowercase , 2 )
_snake_case = self.norm(lowercase ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : Any , lowercase : str ):
'''simple docstring'''
super().__init__()
_snake_case = CombinedTimestepLabelEmbeddings(lowercase , lowercase )
_snake_case = nn.SiLU()
_snake_case = nn.Linear(lowercase , 6 * embedding_dim , bias=lowercase )
_snake_case = nn.LayerNorm(lowercase , elementwise_affine=lowercase , eps=1E-6 )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any , lowercase : Tuple , lowercase : Dict=None ):
'''simple docstring'''
_snake_case = self.linear(self.silu(self.emb(lowercase , lowercase , hidden_dtype=lowercase ) ) )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = emb.chunk(6 , dim=1 )
_snake_case = self.norm(lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : int , lowercase : Optional[str] = None , lowercase : float = 1E-5 ):
'''simple docstring'''
super().__init__()
_snake_case = num_groups
_snake_case = eps
if act_fn is None:
_snake_case = None
else:
_snake_case = get_activation(lowercase )
_snake_case = nn.Linear(lowercase , out_dim * 2 )
def A ( self : Optional[Any] , lowercase : Tuple , lowercase : List[Any] ):
'''simple docstring'''
if self.act:
_snake_case = self.act(lowercase )
_snake_case = self.linear(lowercase )
_snake_case = emb[:, :, None, None]
_snake_case , _snake_case = emb.chunk(2 , dim=1 )
_snake_case = F.group_norm(lowercase , self.num_groups , eps=self.eps )
_snake_case = x * (1 + scale) + shift
return x | 686 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCamelCase : List[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_lowerCamelCase : Any = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_lowerCamelCase : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a_ ( __lowercase : List[Any] , __lowercase : Any ) -> Union[str, Any]:
return float((preds == labels).mean() )
def a_ ( __lowercase : Optional[Any] , __lowercase : List[str] ) -> Dict:
_snake_case = simple_accuracy(__lowercase , __lowercase )
_snake_case = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __lowercase : int , __lowercase : str ) -> str:
_snake_case = float(pearsonr(__lowercase , __lowercase )[0] )
_snake_case = float(spearmanr(__lowercase , __lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[Any] , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) | 686 | 1 |
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = botoa.client("""iam""" )
UpperCamelCase__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A , AssumeRolePolicyDocument=json.dumps(_A , indent=2 ) )
UpperCamelCase__ = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = botoa.client("""iam""" )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , _A , )
UpperCamelCase__ = None
if credentials_configuration == 0:
UpperCamelCase__ = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
UpperCamelCase__ = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
UpperCamelCase__ = _ask_field("""AWS Access Key ID: """ )
UpperCamelCase__ = aws_access_key_id
UpperCamelCase__ = _ask_field("""AWS Secret Access Key: """ )
UpperCamelCase__ = aws_secret_access_key
UpperCamelCase__ = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
UpperCamelCase__ = aws_region
UpperCamelCase__ = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , _A , )
if role_management == 0:
UpperCamelCase__ = _ask_field("""Enter your IAM role name: """ )
else:
UpperCamelCase__ = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_A )
UpperCamelCase__ = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
UpperCamelCase__ = None
if is_custom_docker_image:
UpperCamelCase__ = _ask_field("""Enter your Docker image: """ , lambda _A : str(_A ).lower() )
UpperCamelCase__ = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
UpperCamelCase__ = None
if is_sagemaker_inputs_enabled:
UpperCamelCase__ = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda _A : str(_A ).lower() , )
UpperCamelCase__ = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
UpperCamelCase__ = None
if is_sagemaker_metrics_enabled:
UpperCamelCase__ = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda _A : str(_A ).lower() , )
UpperCamelCase__ = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
UpperCamelCase__ = {}
UpperCamelCase__ = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
if use_dynamo:
UpperCamelCase__ = """dynamo_"""
UpperCamelCase__ = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCamelCase__ = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
if use_custom_options:
UpperCamelCase__ = _ask_options(
"""Which mode do you want to use?""" , _A , lambda _A : TORCH_DYNAMO_MODES[int(_A )] , default="""default""" , )
UpperCamelCase__ = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
UpperCamelCase__ = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=_A , error_message="""Please enter yes or no.""" , )
UpperCamelCase__ = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
UpperCamelCase__ = _ask_options(
_A , _A , lambda _A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCamelCase__ = _ask_field(_A , lambda _A : str(_A ).lower() , default="""ml.p3.2xlarge""" )
UpperCamelCase__ = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCamelCase__ = _ask_field(
"""How many machines do you want use? [1]: """ , _A , default=1 , )
UpperCamelCase__ = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=_A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_A , use_cpu=_A , dynamo_config=_A , eca_instance_type=_A , profile=_A , region=_A , iam_role_name=_A , mixed_precision=_A , num_machines=_A , sagemaker_inputs_file=_A , sagemaker_metrics_file=_A , )
| 708 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ : Union[str, Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ : int = None
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __magic_name__( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(""" """ , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __magic_name__( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def __magic_name__( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = collections.Counter(_A ) & collections.Counter(_A )
UpperCamelCase__ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["""id"""]
UpperCamelCase__ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(_A , _A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def __magic_name__( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_A , _A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def __magic_name__( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_0_0.0 * avg_prec}
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCamelCase__ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_A , _A , """pr_exact""" )
merge_eval(_A , _A , """pr_f1""" )
merge_eval(_A , _A , """pr_oracle""" )
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 1_0_0.0 * best_score / len(_A ), best_thresh
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def __magic_name__( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(_A )
UpperCamelCase__ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(_A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(_A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(_A , _A )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(_A , _A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """HasAns""" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ : str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 265 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 476 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def UpperCamelCase_ ( A__ , A__ , A__ , A__ = False ):
a_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a_ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
a_ = """cpu"""
a_ = Path(A__ )
# VAE DECODER
a_ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
a_ = vae_decoder.config.latent_channels
# forward only through the decoder part
a_ = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 263 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class a__ :
"""simple docstring"""
@add_start_docstrings(_A )
def __call__( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a__ :
"""simple docstring"""
@add_start_docstrings(_A )
def __call__( self , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a__ ( snake_case__ ):
"""simple docstring"""
@add_start_docstrings(_A )
def __call__( self , lowercase , lowercase , lowercase , **lowercase ) -> str:
'''simple docstring'''
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
A__ = processor(_A , _A , _A , **_A )
else:
A__ = processor(_A , _A , _A )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase ) -> int:
'''simple docstring'''
if not isinstance(_A , _A ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
A__ = temperature
def __call__( self , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = scores / self.temperature
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase = -float("Inf" ) , lowercase = 1 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_A , _A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(_A , _A ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self , lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = lax.top_k(_A , scores.shape[-1] )
A__ = jnp.full_like(_A , self.filter_value )
A__ = jax.nn.softmax(_A , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_A , 1 )
score_mask |= score_mask.at[:, 0].set(_A )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_A )
A__ = jnp.where(_A , _A , _A )
A__ = jax.lax.sort_key_val(_A , _A )[-1]
return next_scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase = -float("Inf" ) , lowercase = 1 ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_A , _A ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
A__ = max(_A , _A )
A__ = filter_value
def __call__( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ = lax.top_k(_A , _A )
A__ = jnp.broadcast_to((jnp.arange(_A ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_A )
A__ = next_scores_flat.reshape(_A , _A )
return next_scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase ) -> str:
'''simple docstring'''
A__ = bos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_A , new_scores.at[:, self.bos_token_id].set(0 ) , _A )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = max_length
A__ = eos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_A , new_scores.at[:, self.eos_token_id].set(0 ) , _A )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
if not isinstance(_A , _A ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(_A , _A ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A__ = min_length
A__ = eos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_A , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _A )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = list(_A )
A__ = begin_index
def __call__( self , lowercase , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_A , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _A )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase ) -> str:
'''simple docstring'''
A__ = list(_A )
def __call__( self , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase ) -> Any:
'''simple docstring'''
A__ = dict(_A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_A )
A__ = jnp.intaa(_A )
def __call__( self , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
def _force_token(lowercase ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_A , dtype=scores.dtype ) * -float("inf" )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_A , _A , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_A ) , lambda: scores , ) , )
return scores
class a__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_A , "max_initial_timestamp_index" ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self , lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowercase , lowercase ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _A , _A )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _A , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _A , _A )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _A , _A , )
return jnp.where(
_A , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _A , )
A__ = jax.vmap(_A )(_A , _A )
A__ = jnp.where(cur_len == self.begin_index , _A , _A )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _A , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_A , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _A , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_A , axis=-1 )
def handle_cumulative_probs(lowercase , lowercase ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _A , )
A__ = jax.vmap(_A )(_A , _A )
return scores
| 711 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__SCREAMING_SNAKE_CASE : str = 2_9_9_7_9_2_4_5_8
# Symbols
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = symbols('''ct x y z''')
def lowerCAmelCase_( lowercase_ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase_( lowercase_ : float ) -> float:
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def lowerCAmelCase_( lowercase_ : float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__SCREAMING_SNAKE_CASE : List[str] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
__SCREAMING_SNAKE_CASE : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 661 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCamelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _snake_case ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = torchvision.models.resnetaaa(pretrained=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(model.children() )[:-2]
lowerCAmelCase = nn.Sequential(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.pool(self.model(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.flatten(_SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCAmelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for l in open(_SCREAMING_SNAKE_CASE )]
lowerCAmelCase = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer
lowerCAmelCase = labels
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = max_seq_length
lowerCAmelCase = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase = sentence[: self.max_seq_length]
lowerCAmelCase = torch.zeros(self.n_classes )
lowerCAmelCase = 1
lowerCAmelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCAmelCase = self.transforms(_SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def snake_case ( snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [len(row['sentence'] ) for row in batch]
lowerCAmelCase , lowerCAmelCase = len(snake_case ), max(snake_case )
lowerCAmelCase = torch.zeros(snake_case , snake_case , dtype=torch.long )
lowerCAmelCase = torch.zeros(snake_case , snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case , snake_case ) ):
lowerCAmelCase = input_row['sentence']
lowerCAmelCase = 1
lowerCAmelCase = torch.stack([row['image'] for row in batch] )
lowerCAmelCase = torch.stack([row['label'] for row in batch] )
lowerCAmelCase = torch.stack([row['image_start_token'] for row in batch] )
lowerCAmelCase = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case ( ) -> Any:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 701 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Any = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''openai-gpt'''
SCREAMING_SNAKE_CASE : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=4_04_78 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = afn
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = summary_type
lowerCAmelCase = summary_use_proj
lowerCAmelCase = summary_activation
lowerCAmelCase = summary_first_dropout
lowerCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 514 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : List[str] ) -> None:
'''simple docstring'''
a__ : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
a__ : List[Any] = Vector()
def UpperCAmelCase ( self : Optional[Any] ) -> None:
'''simple docstring'''
a__ : List[str] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a_ ) , "(0,0,0,0,0,1)" )
def UpperCAmelCase ( self : Optional[Any] ) -> None:
'''simple docstring'''
a__ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(a_ ) , 4 )
def UpperCAmelCase ( self : int ) -> None:
'''simple docstring'''
a__ : Any = Vector([1, 2] )
a__ : Optional[int] = Vector([1, 2, 3, 4, 5] )
a__ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
a__ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCAmelCase ( self : Any ) -> None:
'''simple docstring'''
a__ : int = Vector([1, 2, 3] )
a__ : Optional[int] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase ( self : int ) -> None:
'''simple docstring'''
a__ : Tuple = Vector([1, 2, 3] )
a__ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase ( self : List[Any] ) -> None:
'''simple docstring'''
a__ : Tuple = Vector([1, 2, 3] )
a__ : Dict = Vector([2, -1, 4] ) # for test of dot product
a__ : List[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase ( self : List[Any] ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCAmelCase ( self : List[Any] ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCAmelCase ( self : Dict ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = Vector([1, 2, 3] )
a__ : Tuple = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a_ , a_ ) ) , "(3,4,7)" )
def UpperCAmelCase ( self : str ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
a__ : Any = x.copy()
self.assertEqual(str(a_ ) , str(a_ ) )
def UpperCAmelCase ( self : Any ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a_ ) , "(0,1,0)" )
def UpperCAmelCase ( self : List[str] ) -> None:
'''simple docstring'''
a__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(a_ ) )
def UpperCAmelCase ( self : List[str] ) -> None:
'''simple docstring'''
a__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a_ , a_ ) )
def UpperCAmelCase ( self : Tuple ) -> None:
'''simple docstring'''
a__ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a_ , a_ ) )
def UpperCAmelCase ( self : Any ) -> None:
'''simple docstring'''
a__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase ( self : int ) -> None:
'''simple docstring'''
a__ : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
a__ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCAmelCase ( self : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(a_ ) )
def UpperCAmelCase ( self : Optional[Any] ) -> None:
'''simple docstring'''
a__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase ( self : Optional[Any] ) -> None:
'''simple docstring'''
a__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCAmelCase ( self : Optional[int] ) -> None:
'''simple docstring'''
a__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a__ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCAmelCase ( self : Tuple ) -> None:
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main() | 642 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a__ : Dict = tmp_path_factory.getbasetemp() / "cache"
a__ : int = test_hf_cache_home / "datasets"
a__ : Tuple = test_hf_cache_home / "metrics"
a__ : Any = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase__ ) )
a__ : Optional[int] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
a__ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="session" )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase__ )
@pytest.fixture
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase__ ) | 642 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = BertTokenizer
UpperCamelCase__ = BertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = filter_non_english
def lowercase__ ( self : Tuple )->Tuple:
super().setUp()
_UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] )->Optional[int]:
_UpperCAmelCase = '''UNwant\u00E9d,running'''
_UpperCAmelCase = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowercase__ ( self : List[str] )->int:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''UNwant\u00E9d,running'''
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# With lower casing
_UpperCAmelCase = self.get_tokenizer(do_lower_case=__UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase )
_UpperCAmelCase = '''UNwant\u00E9d,running'''
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase__ ( self : Optional[int] )->Any:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowercase__ ( self : Optional[int] )->Dict:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = BasicTokenizer()
_UpperCAmelCase = '''a\n\'ll !!to?\'d of, can\'t.'''
_UpperCAmelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__UpperCamelCase ) , __UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCAmelCase = {}
for i, token in enumerate(__UpperCamelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowercase__ ( self : List[str] )->str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowercase__ ( self : Optional[int] )->Dict:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowercase__ ( self : Dict )->int:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def lowercase__ ( self : Union[str, Any] )->int:
_UpperCAmelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
_UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowercase__ ( self : List[Any] )->int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_UpperCAmelCase = tokenizer_r.encode_plus(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase , )
_UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase , '''do_lower_case''' ) else False
_UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = ['''的''', '''人''', '''有''']
_UpperCAmelCase = ''''''.join(__UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = False
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__UpperCamelCase )
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
| 95 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Dict = logging.getLogger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _a ( pl.Callback):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Tuple=True )->None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / '''test_results.txt'''
_UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_UpperCAmelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , '''a+''' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'{key}: {val:.6f}\n'
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict )->Union[str, Any]:
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule )->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , '''test''' )
@rank_zero_only
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] )->Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 95 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(__UpperCamelCase , [] , 0 , [0 for i in range(len(__UpperCamelCase ) )] )
def lowerCamelCase_ ( __UpperCamelCase : list[int | str] , __UpperCamelCase : list[int | str] , __UpperCamelCase : int , __UpperCamelCase : list[int] , ) -> None:
"""simple docstring"""
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
for i in range(len(__UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A = True
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 , __UpperCamelCase )
current_sequence.pop()
_A = False
lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 292 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowerCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowerCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowerCamelCase_ ( __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : int=False , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]="dummy_doc" ) -> Optional[Any]:
"""simple docstring"""
_A = {doc: key_lines}
_A = {doc: sys_lines}
_A = {}
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A = 0
_A , _A = reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_A , _A = reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_A = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_A , _A = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_A , _A = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_A = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_A = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_A = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def lowerCamelCase_ ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_A = get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_A = {}
_A = 0
_A = 0
for name, metric in metrics:
_A , _A , _A = evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
_A = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_A = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_A = line.split()[5]
if not parse_col == "-":
_A = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase ( self )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False )-> Any:
_A = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_A = util.check_gold_parse_annotation(_UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_A = evaluate(
key_lines=_UpperCamelCase , sys_lines=_UpperCamelCase , metrics=_UpperCamelCase , NP_only=_UpperCamelCase , remove_nested=_UpperCamelCase , keep_singletons=_UpperCamelCase , min_span=_UpperCamelCase , )
return score
| 292 | 1 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return np.array_equal(lowercase__ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] = v.conjugate().T
SCREAMING_SNAKE_CASE_ : Tuple = v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def __SCREAMING_SNAKE_CASE ( ) -> str:
SCREAMING_SNAKE_CASE_ : str = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : Any = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 719 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__: Tuple = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any = '\n'.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('w' ).writelines(SCREAMING_SNAKE_CASE )
lowerCAmelCase__: Any = "patrickvonplaten/t5-tiny-random"
lowerCAmelCase__: Tuple = "sshleifer/bart-tiny-random"
lowerCAmelCase__: Dict = "sshleifer/tiny-mbart"
lowerCAmelCase__: Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case_ ( lowerCAmelCase ):
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
SCREAMING_SNAKE_CASE_ : Tuple = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : Dict = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
SCREAMING_SNAKE_CASE_ : List[str] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE_ : List[Any] = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(__lowerCAmelCase , 'argv' , __lowerCAmelCase ):
run_generate()
assert Path(__lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def __A ( self ):
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __A ( self , __lowerCAmelCase ):
self.run_eval_tester(__lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
SCREAMING_SNAKE_CASE_ : Dict = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE_ : str = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = str(tmp_dir / 'scores.json' )
SCREAMING_SNAKE_CASE_ : List[str] = str(tmp_dir / 'val.target' )
_dump_articles(__lowerCAmelCase , text['en'] )
_dump_articles(__lowerCAmelCase , text['de'] )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE_ : List[str] = F'\n run_eval_search.py\n {model}\n {str(__lowerCAmelCase )}\n {str(__lowerCAmelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__lowerCAmelCase , 'argv' , __lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE_ : Dict = [' num_beams | length_penalty', model, 'Best score args']
SCREAMING_SNAKE_CASE_ : Dict = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__lowerCAmelCase ).exists()
os.remove(Path(__lowerCAmelCase ) )
| 311 | 0 |
"""simple docstring"""
_snake_case = "Input must be a string of 8 numbers plus letter"
_snake_case = "TRWAGMYFPDXBNJZSQVHLCKE"
def snake_case ( _a: str )-> bool:
'''simple docstring'''
if not isinstance(_a , _a ):
lowerCamelCase__ = F'Expected string as input, found {type(_a ).__name__}'
raise TypeError(_a )
lowerCamelCase__ = spanish_id.replace('-' , '' ).upper()
if len(_a ) != 9:
raise ValueError(_a )
try:
lowerCamelCase__ = int(spanish_id_clean[0:8] )
lowerCamelCase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_a ) from ex
if letter.isdigit():
raise ValueError(_a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 |
"""simple docstring"""
from collections import deque
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = process_name # process name
lowerCamelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ = arrival_time
lowerCamelCase__ = burst_time # remaining burst time
lowerCamelCase__ = 0 # total time of the process wait in ready queue
lowerCamelCase__ = 0 # time from arrival time to completion time
class _a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int , ):
# total number of mlfq's queues
lowerCamelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ = queue
# current time
lowerCamelCase__ = current_time
# finished process is in this sequence queue
lowerCamelCase__ = deque()
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : list[Process] ):
lowerCamelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : deque[Process] ):
return [q.burst_time for q in queue]
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : deque[Process] ):
lowerCamelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE__ ) != 0:
lowerCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : deque[Process] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowerCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ = 0
# set the finish time
lowerCamelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE__ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _UpperCamelCase ( self : Dict ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_snake_case = Process("P1", 0, 53)
_snake_case = Process("P2", 0, 17)
_snake_case = Process("P3", 0, 68)
_snake_case = Process("P4", 0, 24)
_snake_case = 3
_snake_case = [17, 25]
_snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_snake_case = Process("P1", 0, 53)
_snake_case = Process("P2", 0, 17)
_snake_case = Process("P3", 0, 68)
_snake_case = Process("P4", 0, 24)
_snake_case = 3
_snake_case = [17, 25]
_snake_case = deque([Pa, Pa, Pa, Pa])
_snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
_snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 510 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""speech"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
requires_backends(self , ['speech'] )
class a ( metaclass=_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""speech"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> List[str]:
requires_backends(self , ['speech'] )
| 532 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=32 , __magic_name__=2 , __magic_name__=3 , __magic_name__=16 , __magic_name__=[1, 2, 1] , __magic_name__=[2, 2, 4] , __magic_name__=2 , __magic_name__=2.0 , __magic_name__=True , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__="gelu" , __magic_name__=False , __magic_name__=True , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=10 , __magic_name__=8 , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = patch_norm
_a = layer_norm_eps
_a = initializer_range
_a = is_training
_a = scope
_a = use_labels
_a = type_sequence_label_size
_a = encoder_stride
def __UpperCAmelCase ( self ) -> List[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> int:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = SwinvaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ )
_a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
_a = SwinvaForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a = 1
_a = SwinvaForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = self.type_sequence_label_size
_a = SwinvaForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = SwinvaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
def __UpperCAmelCase ( self ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __UpperCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__magic_name__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.attentions
_a = len(self.model_tester.depths )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = config.window_size**2
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_a = len(__magic_name__ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_a = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_a = 2
self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) )
_a = outputs.attentions
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
_a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
_a = outputs.hidden_states
_a = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swinv2 has a different seq_length
_a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_a = outputs.reshaped_hidden_states
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
_a , _a , _a , _a = reshaped_hidden_states[0].shape
_a = (
reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
def __UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = SwinvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(__magic_name__ )
for model_class in self.all_model_classes:
_a = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class a ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__magic_name__ )
_a = self.default_image_processor
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = image_processor(images=__magic_name__ , return_tensors='pt' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
_a = model(**__magic_name__ )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
_a = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 532 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 140 | import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
A : Dict = 5
A : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = SpeechaTextTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def __A ( self : Optional[Any] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
SCREAMING_SNAKE_CASE_ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
SCREAMING_SNAKE_CASE_ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "<pad>"
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1_001 )
def __A ( self : List[Any] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def __A ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __A ( self : List[str] ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {"input_ids": [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase__ = '''C\'est trop cool'''
lowerCamelCase__ = '''Esto es genial'''
@classmethod
def __A ( cls : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __A ( self : str ) -> int:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __A ( self : List[Any] ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def __A ( self : Any ) -> int:
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "fr"
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __A ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE_ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 140 | 1 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = max(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[Any] = min(SCREAMING_SNAKE_CASE_ )
# create the counting array
SCREAMING_SNAKE_CASE_ : Optional[Any] = coll_max + 1 - coll_min
SCREAMING_SNAKE_CASE_ : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : Any = counting_arr[i] + counting_arr[i - 1]
# create the output collection
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]:
"""simple docstring"""
return "".join([chr(SCREAMING_SNAKE_CASE_ ) for i in counting_sort([ord(SCREAMING_SNAKE_CASE_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 717 |
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 68 | 0 |
"""simple docstring"""
def __a ( A ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(A , A ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 337 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase =logging.getLogger(__name__)
__UpperCAmelCase =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCAmelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase__ :
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
lowercase__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
lowercase__ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase__ : float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
lowercase__ : int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
lowercase__ : int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( A , A , A = False , A = None , ) -> Tuple:
'''simple docstring'''
def _dataset(A , A=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , )
return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __a ( ) -> Tuple:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
A__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
A__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
A__ = AutoModelWithLMHead.from_config(A )
model.resize_token_embeddings(len(A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
A__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
A__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
A__ = (
get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
A__ = (
get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
A__ = DataCollatorForPermutationLanguageModeling(
tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
A__ = DataCollatorForWholeWordMask(
tokenizer=A , mlm_probability=data_args.mlm_probability )
else:
A__ = DataCollatorForLanguageModeling(
tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A__ = Trainer(
model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , )
# Training
if training_args.do_train:
A__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = math.exp(eval_output["eval_loss"] )
A__ = {"perplexity": perplexity}
A__ = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , A , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(A )
return results
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 337 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE( snake_case_ : list[int] ) ->float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
_lowercase : Tuple = sum(snake_case_ ) / len(snake_case_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 |
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
_lowercase : Any = 2
_lowercase : List[str] = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowercase : Tuple = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : str = end + 1
_lowercase : Optional[int] = min(2 * end , snake_case_ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowercase : Optional[int] = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Union[str, Any] = high + 1
_lowercase : Tuple = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 411 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def _lowerCamelCase( __snake_case ) -> List[str]:
__snake_case = DPTConfig()
if "large" in checkpoint_url:
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
__snake_case = [5, 11, 17, 23]
__snake_case = [256, 512, 1024, 1024]
__snake_case = (1, 384, 384)
if "ade" in checkpoint_url:
__snake_case = True
__snake_case = 150
__snake_case = "huggingface/label-files"
__snake_case = "ade20k-id2label.json"
__snake_case = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type="dataset" ) ) , "r" ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = [1, 150, 480, 480]
return config, expected_shape
def _lowerCamelCase( __snake_case ) -> Dict:
__snake_case = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _lowerCamelCase( __snake_case ) -> Union[str, Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__snake_case = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__snake_case = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
__snake_case = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__snake_case = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__snake_case = name.replace("proj" , "projection" )
if "blocks" in name:
__snake_case = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__snake_case = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
__snake_case = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__snake_case = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__snake_case = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__snake_case = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__snake_case = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__snake_case = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__snake_case = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__snake_case = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__snake_case = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__snake_case = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__snake_case = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__snake_case = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__snake_case = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__snake_case = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__snake_case = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__snake_case = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__snake_case = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__snake_case = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__snake_case = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__snake_case = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__snake_case = name.replace("pretrained" , "dpt" )
if "bn" in name:
__snake_case = name.replace("bn" , "batch_norm" )
if "head" in name:
__snake_case = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__snake_case = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__snake_case = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def _lowerCamelCase( __snake_case , __snake_case ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[: config.hidden_size, :]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase( ) -> Any:
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case ) -> str:
__snake_case , __snake_case = get_dpt_config(__snake_case )
# load original state_dict from URL
__snake_case = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
__snake_case = DPTForSemanticSegmentation(__snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
__snake_case = 480 if "ade" in checkpoint_url else 384
__snake_case = DPTImageProcessor(size=__snake_case )
__snake_case = prepare_img()
__snake_case = image_processor(__snake_case , return_tensors="pt" )
# forward pass
__snake_case = model(**__snake_case ).logits if "ade" in checkpoint_url else model(**__snake_case ).predicted_depth
# Assert logits
__snake_case = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
__snake_case = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __snake_case , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __snake_case )
)
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 524 | from ...configuration_utils import PretrainedConfig
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = """bert-generation"""
def __init__( self : Tuple ,_lowerCAmelCase : Union[str, Any]=50_358 ,_lowerCAmelCase : List[Any]=1_024 ,_lowerCAmelCase : str=24 ,_lowerCAmelCase : Any=16 ,_lowerCAmelCase : Any=4_096 ,_lowerCAmelCase : Any="gelu" ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : Optional[Any]=512 ,_lowerCAmelCase : Optional[Any]=0.0_2 ,_lowerCAmelCase : Union[str, Any]=1E-12 ,_lowerCAmelCase : Optional[int]=0 ,_lowerCAmelCase : Optional[int]=2 ,_lowerCAmelCase : Optional[Any]=1 ,_lowerCAmelCase : Any="absolute" ,_lowerCAmelCase : str=True ,**_lowerCAmelCase : List[Any] ,):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
| 524 | 1 |
def _lowerCAmelCase ( A__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(A__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__, lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(A__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__, lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
a__ : int = [int(x) for x in input().split()]
# inputing elements of the list in one line
a__ : Any = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 642 |
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = min_resolution
UpperCamelCase : Any = max_resolution
UpperCamelCase : Tuple = do_resize
UpperCamelCase : Tuple = size
UpperCamelCase : Dict = do_normalize
UpperCamelCase : List[str] = image_mean
UpperCamelCase : Optional[Any] = image_std
UpperCamelCase : int = do_rescale
UpperCamelCase : List[Any] = rescale_factor
UpperCamelCase : int = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=False ) -> Any:
'''simple docstring'''
if not batched:
UpperCamelCase : int = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
UpperCamelCase , UpperCamelCase : List[Any] = image.size
else:
UpperCamelCase , UpperCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase : List[Any] = int(self.size["shortest_edge"] * h / w )
UpperCamelCase : Optional[Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase : str = self.size["shortest_edge"]
UpperCamelCase : List[Any] = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase : Optional[int] = self.size["shortest_edge"]
UpperCamelCase : List[Any] = self.size["shortest_edge"]
else:
UpperCamelCase : Any = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase : List[str] = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
UpperCamelCase : Dict = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Dict = ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
UpperCamelCase : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase : str = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
UpperCamelCase : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
UpperCamelCase , UpperCamelCase : str = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase : List[str] = json.loads(f.read() )
UpperCamelCase : int = {"image_id": 3_97_69, "annotations": target}
# encode them
UpperCamelCase : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
UpperCamelCase : str = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
UpperCamelCase : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
UpperCamelCase : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
UpperCamelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
UpperCamelCase : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
UpperCamelCase : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
UpperCamelCase : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
UpperCamelCase : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
UpperCamelCase : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase : Dict = json.loads(f.read() )
UpperCamelCase : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
UpperCamelCase : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase : Optional[Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
UpperCamelCase : List[str] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
UpperCamelCase : Optional[int] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
UpperCamelCase : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
UpperCamelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
UpperCamelCase : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
UpperCamelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
UpperCamelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
UpperCamelCase : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
UpperCamelCase : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
UpperCamelCase : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 173 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCAmelCase_ = True
from torch.cuda.amp import autocast
lowerCAmelCase_ = logging.getLogger(__name__)
def A__ ( A : str=None , A : Union[str, Any]=None):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
__SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__SCREAMING_SNAKE_CASE = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
__SCREAMING_SNAKE_CASE = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , lowerCamelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCamelCase : Dict = [{"input_values": feature["input_values"]} for feature in features]
UpperCamelCase : str = [{"input_ids": feature["labels"]} for feature in features]
UpperCamelCase : Optional[Any] = self.processor.pad(
lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
UpperCamelCase : Union[str, Any] = self.processor.pad(
labels=lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
UpperCamelCase : int = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
UpperCamelCase : Any = labels
return batch
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
UpperCamelCase : List[Any] = self._prepare_inputs(lowerCamelCase )
if self.use_amp:
with autocast():
UpperCamelCase : Union[str, Any] = self.compute_loss(lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase : List[str] = self.compute_loss(lowerCamelCase , lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCamelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCamelCase : str = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCamelCase : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase )
else:
loss.backward()
return loss.detach()
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase : Any = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Dict = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets:
UpperCamelCase : Union[str, Any] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name)
UpperCamelCase : Optional[int] = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test")
# Create and save tokenizer
UpperCamelCase : List[str] = F'''[{''.join(data_args.chars_to_ignore)}]'''
def remove_special_characters(A : List[Any]):
UpperCamelCase : Optional[int] = re.sub(A , "" , batch["sentence"]).lower() + " "
return batch
UpperCamelCase : Any = train_dataset.map(A , remove_columns=["sentence"])
UpperCamelCase : int = eval_dataset.map(A , remove_columns=["sentence"])
def extract_all_chars(A : Union[str, Any]):
UpperCamelCase : Tuple = " ".join(batch["text"])
UpperCamelCase : Optional[Any] = list(set(A))
return {"vocab": [vocab], "all_text": [all_text]}
UpperCamelCase : Tuple = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=train_dataset.column_names , )
UpperCamelCase : Optional[Any] = train_dataset.map(
A , batched=A , batch_size=-1 , keep_in_memory=A , remove_columns=eval_dataset.column_names , )
UpperCamelCase : Dict = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
UpperCamelCase : Tuple = {v: k for k, v in enumerate(A)}
UpperCamelCase : Tuple = vocab_dict[" "]
del vocab_dict[" "]
UpperCamelCase : List[str] = len(A)
UpperCamelCase : Dict = len(A)
with open("vocab.json" , "w") as vocab_file:
json.dump(A , A)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : int = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
UpperCamelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=A , return_attention_mask=A)
UpperCamelCase : int = WavaVecaProcessor(feature_extractor=A , tokenizer=A)
UpperCamelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer) , )
if data_args.max_train_samples is not None:
UpperCamelCase : Union[str, Any] = min(len(A) , data_args.max_train_samples)
UpperCamelCase : int = train_dataset.select(range(A))
if data_args.max_val_samples is not None:
UpperCamelCase : Dict = eval_dataset.select(range(data_args.max_val_samples))
UpperCamelCase : Union[str, Any] = torchaudio.transforms.Resample(4_80_00 , 1_60_00)
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A : Union[str, Any]):
UpperCamelCase , UpperCamelCase : List[str] = torchaudio.load(batch["path"])
UpperCamelCase : List[str] = resampler(A).squeeze().numpy()
UpperCamelCase : Dict = 1_60_00
UpperCamelCase : str = batch["text"]
return batch
UpperCamelCase : int = train_dataset.map(
A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : int = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A : Dict):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
UpperCamelCase : Union[str, Any] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0])
batch.update(A)
return batch
UpperCamelCase : str = train_dataset.map(
A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
UpperCamelCase : Union[str, Any] = eval_dataset.map(
A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCamelCase : Tuple = datasets.load_metric("wer")
def compute_metrics(A : int):
UpperCamelCase : Union[str, Any] = pred.predictions
UpperCamelCase : Tuple = np.argmax(A , axis=-1)
UpperCamelCase : int = processor.tokenizer.pad_token_id
UpperCamelCase : Union[str, Any] = processor.batch_decode(A)
# we do not want to group tokens when computing the metrics
UpperCamelCase : List[Any] = processor.batch_decode(pred.label_ids , group_tokens=A)
UpperCamelCase : Optional[Any] = wer_metric.compute(predictions=A , references=A)
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCamelCase : Dict = DataCollatorCTCWithPadding(processor=A , padding=A)
# Initialize our Trainer
UpperCamelCase : int = CTCTrainer(
model=A , data_collator=A , args=A , compute_metrics=A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase : List[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
UpperCamelCase : Tuple = model_args.model_name_or_path
else:
UpperCamelCase : str = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank):
processor.save_pretrained(training_args.output_dir)
UpperCamelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=A)
trainer.save_model()
UpperCamelCase : int = train_result.metrics
UpperCamelCase : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A)
)
UpperCamelCase : int = min(A , len(A))
trainer.log_metrics("train" , A)
trainer.save_metrics("train" , A)
trainer.save_state()
# Evaluation
UpperCamelCase : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
UpperCamelCase : Optional[Any] = trainer.evaluate()
UpperCamelCase : int = data_args.max_val_samples if data_args.max_val_samples is not None else len(A)
UpperCamelCase : Dict = min(A , len(A))
trainer.log_metrics("eval" , A)
trainer.save_metrics("eval" , A)
return results
if __name__ == "__main__":
main()
| 173 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
snake_case__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
snake_case__ = []
snake_case__ = []
snake_case__ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
snake_case__ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
snake_case__ = 0
for log in Path().glob('*.log'):
snake_case__ = 0
with open(log, 'r') as f:
for line in f:
snake_case__ = json.loads(line)
if line.get('nodeid', '') != "":
snake_case__ = line['nodeid']
if line.get('duration', None) is not None:
snake_case__ = f'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
snake_case__ = []
log.unlink()
snake_case__ = ''
snake_case__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
snake_case__ = []
snake_case__ = {}
for test in failed_tests:
snake_case__ = test[0].split('::')
snake_case__ = data[0].split('/')[-1]
if data[0] not in filesafailed:
snake_case__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
snake_case__ = [test[0] for test in failed_table]
snake_case__ = list(set(files))
# Count number of instances in failed_tests
snake_case__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
snake_case__ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
snake_case__ = 'Too many failed tests, please see the full report in the Action results.'
snake_case__ = len(err) + 10
snake_case__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
snake_case__ = 'No failed tests! 🤗'
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
snake_case__ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
snake_case__ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
snake_case__ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
snake_case__ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
snake_case__ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
snake_case__ = row[0]
else:
snake_case__ = ''
snake_case__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
) | 638 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng" | 638 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: int = "mask2former"
lowerCamelCase__: Optional[Any] = ["swin"]
lowerCamelCase__: Optional[Any] = {"hidden_size": "hidden_dim"}
def __init__( self: str , __lowerCamelCase: Optional[Dict] = None , __lowerCamelCase: int = 2_56 , __lowerCamelCase: int = 2_56 , __lowerCamelCase: int = 2_56 , __lowerCamelCase: int = 10_24 , __lowerCamelCase: str = "relu" , __lowerCamelCase: int = 6 , __lowerCamelCase: int = 10 , __lowerCamelCase: int = 8 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: int = 20_48 , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: int = 4 , __lowerCamelCase: int = 2_55 , __lowerCamelCase: int = 1_00 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 2.0 , __lowerCamelCase: float = 5.0 , __lowerCamelCase: float = 5.0 , __lowerCamelCase: int = 1_25_44 , __lowerCamelCase: float = 3.0 , __lowerCamelCase: float = 0.75 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: bool = True , __lowerCamelCase: List[int] = [4, 8, 16, 32] , __lowerCamelCase: bool = None , **__lowerCamelCase: List[str] , ) -> Optional[int]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
__UpperCAmelCase : List[str] = CONFIG_MAPPING["swin"](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = backbone_config.pop("model_type" )
__UpperCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(__lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__UpperCAmelCase : Any = backbone_config
__UpperCAmelCase : Tuple = feature_size
__UpperCAmelCase : Dict = mask_feature_size
__UpperCAmelCase : Union[str, Any] = hidden_dim
__UpperCAmelCase : Union[str, Any] = encoder_feedforward_dim
__UpperCAmelCase : Optional[int] = activation_function
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Tuple = dropout
__UpperCAmelCase : Union[str, Any] = dim_feedforward
__UpperCAmelCase : Optional[Any] = pre_norm
__UpperCAmelCase : Dict = enforce_input_projection
__UpperCAmelCase : Tuple = common_stride
__UpperCAmelCase : Optional[int] = ignore_value
__UpperCAmelCase : Optional[int] = num_queries
__UpperCAmelCase : List[str] = no_object_weight
__UpperCAmelCase : Union[str, Any] = class_weight
__UpperCAmelCase : List[Any] = mask_weight
__UpperCAmelCase : Optional[int] = dice_weight
__UpperCAmelCase : Union[str, Any] = train_num_points
__UpperCAmelCase : str = oversample_ratio
__UpperCAmelCase : Union[str, Any] = importance_sample_ratio
__UpperCAmelCase : int = init_std
__UpperCAmelCase : Optional[Any] = init_xavier_std
__UpperCAmelCase : int = use_auxiliary_loss
__UpperCAmelCase : Dict = feature_strides
__UpperCAmelCase : Optional[Any] = output_auxiliary_logits
__UpperCAmelCase : str = decoder_layers
super().__init__(**__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: Optional[int] ) -> List[str]:
return cls(
backbone_config=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self: Tuple ) -> Dict[str, any]:
__UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
| 382 | from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
__UpperCAmelCase : Tuple = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(snake_case__ ) ) )
__UpperCAmelCase : List[Any] = radians(snake_case__ )
__UpperCAmelCase : Dict = radians(snake_case__ )
# Equation
__UpperCAmelCase : Tuple = sin((phi_a - phi_a) / 2 )
__UpperCAmelCase : Dict = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCAmelCase : Union[str, Any] = sqrt(sin_sq_phi + (cos(snake_case__ ) * cos(snake_case__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10_00) -> int:
a , a = 1, 1
a = []
for i in range(1 , n + 1):
a = prev_numerator + 2 * prev_denominator
a = prev_numerator + prev_denominator
if len(str(__UpperCamelCase)) > len(str(__UpperCamelCase)):
result.append(__UpperCamelCase)
a = numerator
a = denominator
return len(__UpperCamelCase)
if __name__ == "__main__":
print(F'{solution() = }')
| 515 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a = dict(zip(A , range(len(A ) ) ) )
a = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
a = tempfile.mkdtemp()
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , A )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
# load decoder from hub
a = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCAmelCase_ ( self , **A ) -> Optional[int]:
'''simple docstring'''
a = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCAmelCase_ ( self , **A ) -> str:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A )
def lowerCAmelCase_ ( self , **A ) -> List[Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = self.get_tokenizer()
a = self.get_feature_extractor()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
processor.save_pretrained(self.tmpdirname )
a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(A , "include" ):
WavaVecaProcessorWithLM(
tokenizer=A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = floats_list((3, 1000) )
a = feature_extractor(A , return_tensors="np" )
a = processor(A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = "This is a test string"
a = processor(text=A )
a = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self , A=(2, 10, 16) , A=77 ) -> str:
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = self._get_dummy_logits(shape=(10, 16) , seed=13 )
a = processor.decode(A )
a = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCAmelCase_ ( self , A ) -> Optional[int]:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
a = processor.batch_decode(A , A )
a = list(A )
with get_context("fork" ).Pool() as p:
a = decoder.decode_beams_batch(A , A )
a , a , a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(A , decoded_processor.logit_score )
self.assertListEqual(A , decoded_processor.lm_score )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = self._get_dummy_logits()
a = 15
a = -2_0.0
a = -4.0
a = processor.batch_decode(
A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
a = decoded_processor_out.text
a = list(A )
with get_context("fork" ).Pool() as pool:
a = decoder.decode_beams_batch(
A , A , beam_width=A , beam_prune_logp=A , token_min_logp=A , )
a = [d[0][0] for d in decoded_decoder_out]
a = [d[0][2] for d in decoded_decoder_out]
a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A , A )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , A )
self.assertTrue(np.array_equal(A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , A , atol=1e-3 ) )
self.assertTrue(np.array_equal(A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , A , atol=1e-3 ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
a = self._get_dummy_logits()
a = 2.0
a = 5.0
a = -2_0.0
a = True
a = processor.batch_decode(
A , alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
a = decoded_processor_out.text
a = list(A )
decoder.reset_params(
alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , )
with get_context("fork" ).Pool() as pool:
a = decoder.decode_beams_batch(
A , A , )
a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A , A )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , A )
a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a = os.listdir(A )
a = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A , A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = snapshot_download("hf-internal-testing/processor_with_lm" )
a = WavaVecaProcessorWithLM.from_pretrained(A )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a = os.listdir(A )
a = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A , A )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
a = floats_list((3, 1000) )
a = processor_wavaveca(A , return_tensors="np" )
a = processor_auto(A , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
a = self._get_dummy_logits()
a = processor_wavaveca.batch_decode(A )
a = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCAmelCase_ ( A , A ) -> List[Any]:
'''simple docstring'''
a = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = self._get_dummy_logits()[0]
a = processor.decode(A , output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A , A ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = self._get_dummy_logits()
a = processor.batch_decode(A , output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A , A ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(A , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
import torch
a = load_dataset("common_voice" , "en" , split="train" , streaming=A )
a = ds.cast_column("audio" , datasets.Audio(sampling_rate=16000 ) )
a = iter(A )
a = next(A )
a = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
a = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
a = model(A ).logits.cpu().numpy()
a = processor.decode(logits[0] , output_word_offsets=A )
a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(A , "word" ) ) , A )
self.assertEqual(" ".join(self.get_from_offsets(A , "word" ) ) , output.text )
# output times
a = torch.tensor(self.get_from_offsets(A , "start_time" ) )
a = torch.tensor(self.get_from_offsets(A , "end_time" ) )
# fmt: off
a = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
a = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(A , A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(A , A , atol=0.0_1 ) )
| 515 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCamelCase = """bert-base-cased"""
_lowerCamelCase = """google/pegasus-xsum"""
_lowerCamelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_lowerCamelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_lowerCamelCase = """patrickvonplaten/t5-tiny-random"""
_lowerCamelCase = """sshleifer/bart-tiny-random"""
_lowerCamelCase = """sshleifer/tiny-mbart"""
_lowerCamelCase = """sshleifer/tiny-marian-en-de"""
def a__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.source''' ) , _SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.target''' ) , _SCREAMING_SNAKE_CASE )
return tmp_dir
class _snake_case (__SCREAMING_SNAKE_CASE):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[int] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
UpperCAmelCase_ : List[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : Tuple = SeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,src_lang=_snake_case ,tgt_lang=_snake_case ,)
UpperCAmelCase_ : List[Any] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_snake_case ,_snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch["labels"] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = LegacySeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=20 ,max_target_length=_snake_case ,)
UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCAmelCase_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCAmelCase_ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_snake_case ,_snake_case ,1_28 ,_snake_case )
UpperCAmelCase_ : Dict = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : str = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : List[str] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case ) < len(_snake_case )
assert len(_snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(_snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="This test requires fairseq" )
def UpperCamelCase__ ( self ):
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ : Dict = self._get_dataset(max_len=64 )
UpperCAmelCase_ : int = 64
UpperCAmelCase_ : str = ds.make_dynamic_sampler(_snake_case ,required_batch_size_multiple=_snake_case )
UpperCAmelCase_ : Dict = [len(_snake_case ) for x in batch_sampler]
assert len(set(_snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case ) == len(_snake_case ) # no dropped or added examples
UpperCAmelCase_ : Any = DataLoader(_snake_case ,batch_sampler=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
for batch in data_loader:
UpperCAmelCase_ : Any = batch["input_ids"].shape
UpperCAmelCase_ : Optional[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : int = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case )
assert num_src_per_batch[0] == max(_snake_case )
if failures:
raise AssertionError(f'''too many tokens in {len(_snake_case )} batches''' )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self._get_dataset(max_len=5_12 )
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[int] = ds.make_sortish_sampler(_snake_case ,shuffle=_snake_case )
UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : Union[str, Any] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(_snake_case ,_snake_case="input_ids" ):
return [batch[k].eq(_snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case ,k="labels" ) ) < sum(count_pad_tokens(_snake_case ,k="labels" ) )
assert sum(count_pad_tokens(_snake_case ) ) < sum(count_pad_tokens(_snake_case ) )
assert len(_snake_case ) == len(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=10_00 ,_snake_case=1_28 ):
if os.getenv("USE_REAL_DATA" ,_snake_case ):
UpperCAmelCase_ : List[Any] = "examples/seq2seq/wmt_en_ro"
UpperCAmelCase_ : Dict = max_len * 2 * 64
if not Path(_snake_case ).joinpath("train.len" ).exists():
save_len_file(_snake_case ,_snake_case )
else:
UpperCAmelCase_ : Optional[Any] = "examples/seq2seq/test_data/wmt_en_ro"
UpperCAmelCase_ : Union[str, Any] = max_len * 4
save_len_file(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Union[str, Any] = SeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,n_obs=_snake_case ,)
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self._get_dataset()
UpperCAmelCase_ : Tuple = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=0 ,add_extra_examples=_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=1 ,add_extra_examples=_snake_case ) )
assert idsa.intersection(_snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_snake_case ,use_fast=_snake_case )
if tok_name == MBART_TINY:
UpperCAmelCase_ : int = SeqaSeqDataset(
_snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,src_lang="EN" ,tgt_lang="FR" ,)
UpperCAmelCase_ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : int = SeqaSeqDataset(
_snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,)
UpperCAmelCase_ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case ) == 1 if tok_name == BART_TINY else len(_snake_case ) == 0
| 720 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
_lowerCamelCase = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
_lowerCamelCase = re.compile(R"""([a-z\d])([A-Z])""")
_lowerCamelCase = re.compile(R"""(?<!_)_(?!_)""")
_lowerCamelCase = re.compile(R"""(_{2,})""")
_lowerCamelCase = R"""^\w+(\.\w+)*$"""
_lowerCamelCase = R"""<>:/\|?*"""
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = _uppercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = _lowercase_uppercase_re.sub(r"\1_\2" , _SCREAMING_SNAKE_CASE )
return name.lower()
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = _single_underscore_re.split(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != "" )
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , _SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}'''
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ : str = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return F'''{filepath}*'''
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = filename_prefix_for_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if shard_lengths:
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_SCREAMING_SNAKE_CASE )]
if filetype_suffix:
UpperCAmelCase_ : Dict = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCAmelCase_ : List[str] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 323 | 0 |
import inspect
import unittest
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _a ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase_: Tuple = inspect.getmembers(_lowerCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase_: Any = 'k-diffusion'
elif backend == "invisible_watermark":
UpperCamelCase_: Tuple = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!''' | 57 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
A_ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: Tuple = {}
with open(UpperCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = line.strip()
if line:
UpperCamelCase_: List[Any] = line.split()
UpperCamelCase_: Optional[Any] = line_number
UpperCamelCase_: Any = words[0]
UpperCamelCase_: List[Any] = value
return result
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
for attribute in key.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: Dict = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Optional[Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Tuple = shape_pointer.shape
# let's reduce dimension
UpperCamelCase_: int = value[0]
else:
UpperCamelCase_: Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase_: Optional[int] = value
elif weight_type == "weight_g":
UpperCamelCase_: Any = value
elif weight_type == "weight_v":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase_: Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = value
else:
UpperCamelCase_: int = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]]
UpperCamelCase_: List[Any] = 'param'
if weight_type is not None and weight_type != "param":
UpperCamelCase_: List[Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCamelCase_: Any = '.'.join([key, hf_param_name] )
else:
UpperCamelCase_: Union[str, Any] = key
UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0]
A_ : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any:
UpperCamelCase_: Optional[int] = False
for key, mapped_key in MAPPING.items():
UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase_: Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Union[str, Any] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: Dict = 'weight_v'
elif "bias" in name:
UpperCamelCase_: int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_: str = 'weight'
else:
UpperCamelCase_: Union[str, Any] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return is_used
return is_used
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[Any] = []
UpperCamelCase_: Dict = fairseq_model.state_dict()
UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: List[Any] = True
else:
UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: int = name.split('.' )
UpperCamelCase_: int = int(items[0] )
UpperCamelCase_: Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase_: int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase_: Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase_: List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict:
if config_path is not None:
UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: List[str] = WavaVecaConfig()
if is_seq_class:
UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ )
UpperCamelCase_: Tuple = idalabel
UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
elif is_finetuned:
if dict_path:
UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: Dict = target_dict.pad_index
UpperCamelCase_: Tuple = target_dict.bos_index
UpperCamelCase_: Optional[Any] = target_dict.eos_index
UpperCamelCase_: Union[str, Any] = len(target_dict.symbols )
UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: str = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase_: List[str] = 0
UpperCamelCase_: List[Any] = 1
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase_: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ )
if is_finetuned or is_seq_class:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ )
UpperCamelCase_: str = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
A_ : int = parser.parse_args()
A_ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 57 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if not sentence:
return ""
_snake_case : List[str] = dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47 | 0 |
UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 590 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
A_ : Tuple = img
A_ : Union[str, Any] = img.shape[1]
A_ : List[Any] = img.shape[0]
A_ : int = dst_width
A_ : Optional[int] = dst_height
A_ : Any = self.src_w / self.dst_w
A_ : List[Any] = self.src_h / self.dst_h
A_ : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ : List[Any] = self.img[self.get_y(_SCREAMING_SNAKE_CASE )][self.get_x(_SCREAMING_SNAKE_CASE )]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return int(self.ratio_x * x )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase = 800, 600
UpperCamelCase = imread("""image_data/lena.jpg""", 1)
UpperCamelCase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 590 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
class _snake_case ( a__ ):
_A = ["pixel_values"]
def __init__( self ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PIL.Image.BICUBIC ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = None ,**UpperCamelCase ,) -> None:
super().__init__(**lowerCamelCase_ )
snake_case__ :int = size if size is not None else {"height": 256, "width": 256}
snake_case__ :Dict = get_size_dict(lowerCamelCase_ )
snake_case__ :Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case__ :Union[str, Any] = get_size_dict(lowerCamelCase_ ,param_name="crop_size" )
snake_case__ :str = do_resize
snake_case__ :List[Any] = size
snake_case__ :Union[str, Any] = resample
snake_case__ :Any = do_center_crop
snake_case__ :List[Any] = crop_size
snake_case__ :Optional[Any] = do_rescale
snake_case__ :Tuple = rescale_factor
snake_case__ :Optional[Any] = do_normalize
snake_case__ :str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ :str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PIL.Image.BICUBIC ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :int = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase_ ,size=(size["height"], size["width"]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :int = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["height"], size["width"]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> List[Any]:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase=None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Tuple = do_resize if do_resize is not None else self.do_resize
snake_case__ :Union[str, Any] = resample if resample is not None else self.resample
snake_case__ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ :Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ :List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ :List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ :List[str] = image_std if image_std is not None else self.image_std
snake_case__ :Union[str, Any] = size if size is not None else self.size
snake_case__ :Optional[int] = get_size_dict(lowerCamelCase_ )
snake_case__ :List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case__ :int = get_size_dict(lowerCamelCase_ ,param_name="crop_size" )
snake_case__ :Optional[int] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case__ :str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
snake_case__ :List[str] = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
snake_case__ :Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
snake_case__ :Union[str, Any] = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
snake_case__ :Any = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
snake_case__ :Dict = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ ) | 709 |
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase ) | 57 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase: Dict = 1_6
_lowercase: Tuple = 3_2
def _lowerCamelCase ( snake_case , snake_case = 16 ):
_lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase = 8
else:
_lowerCAmelCase = None
return tokenizer.pad(
snake_case , padding='longest' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case , drop_last=snake_case )
_lowerCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( snake_case , snake_case ):
# Initialize accelerator
_lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config['lr']
_lowerCAmelCase = int(config['num_epochs'] )
_lowerCAmelCase = int(config['seed'] )
_lowerCAmelCase = int(config['batch_size'] )
_lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(snake_case )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.loss
_lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
_lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case )
def _lowerCamelCase ( ):
_lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case , default=snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 192 | from __future__ import annotations
from random import choice
def _lowerCamelCase ( snake_case ):
return choice(snake_case )
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = random_pivot(snake_case )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case ) < k - 1:
return kth_number(snake_case , k - len(snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : List[str] =16
A_ : str =32
def snake_case_ ( __snake_case : Accelerator , __snake_case : int = 16) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(__snake_case : str):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(__snake_case : List[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Optional[int] =mocked_dataloaders # noqa: F811
def snake_case_ ( __snake_case : Dict , __snake_case : Any) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps)
lowerCAmelCase_ = int(args.local_sgd_steps)
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case)
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''])
lowerCAmelCase_ = int(config['''seed'''])
lowerCAmelCase_ = int(config['''batch_size'''])
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''')
set_seed(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None) as local_sgd:
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case):
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = output.loss
accelerator.backward(__snake_case)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = outputs.logits.argmax(dim=-1)
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case)
def snake_case_ ( ) -> Union[str, Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__snake_case , default=8 , help='''Number of local SGD steps or None to disable local SGD''')
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 710 | '''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ ( __snake_case : Tuple) -> str:
lowerCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''')
lowerCAmelCase_ = json.loads(open(__snake_case).read())
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''')
if not args.output.endswith('''.pt'''):
lowerCAmelCase_ = args.output + '''.pt'''
lowerCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0'''):
lowerCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir)
lowerCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase_ = reader.get_tensor(__snake_case).astype(np.floataa)
if key_name.endswith('''/adam_m''') or key_name.endswith('''/adam_v'''):
continue
if key_name.startswith('''pasts/'''):
if key_name.startswith('''pasts/mlp'''):
lowerCAmelCase_ = int(key_name[9])
elif key_name.startswith('''pasts/out'''):
lowerCAmelCase_ = 8
lowerCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/moe'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/switch_gating/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/softmlp/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/wo/kernel''') or key_name.endswith('''/wi/kernel'''):
lowerCAmelCase_ = key_name[-9:-7]
for i in range(16):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCAmelCase_ = (
vnp[i].transpose([1, 0]).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/mlp'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/p1/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p1/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/ln'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/att'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/qkv/kernel'''):
lowerCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase_ = state[:, 0, :, :]
lowerCAmelCase_ = state[:, 1, :, :]
lowerCAmelCase_ = state[:, 2, :, :]
lowerCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/o/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/an'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif (
key_name.startswith('''model/wte''')
or key_name.startswith('''model/wpe''')
or key_name.startswith('''model/ete''')
):
lowerCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCAmelCase_ = '''model.%s.weight''' % nlayer
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
if key_name.startswith('''model/wte'''):
lowerCAmelCase_ = '''lm_head.weight'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/wob'''):
lowerCAmelCase_ = '''final_logits_bias'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = state.reshape((1, -1))
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense/kernel":
lowerCAmelCase_ = '''model.last_project.weight'''
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense_1/bias":
lowerCAmelCase_ = '''model.last_project.bias'''
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
torch.save(__snake_case , args.output)
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
A_ : Union[str, Any] =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 606 | 0 |
def _a ( lowerCAmelCase = 1000000 )-> Tuple:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = {1: 1}
for inputa in range(2 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE_ = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE_ = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE_ = inputa
SCREAMING_SNAKE_CASE_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 360 | import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__a : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowercase = MaskFormerConfig(backbone_config=lowercase )
__lowercase = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = '''mapillary-vistas-id2label.json'''
__lowercase = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(lowercase ): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = dct.pop(lowercase )
__lowercase = val
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowercase = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowercase = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
__lowercase = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase , '''rb''' ) as f:
__lowercase = pickle.load(lowercase )
__lowercase = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_swin_q_k_v(lowercase , config.backbone_config )
read_in_decoder_q_k_v(lowercase , lowercase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowercase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65535
else:
__lowercase = 255
__lowercase = True if '''ade''' in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowercase , reduce_labels=lowercase )
__lowercase = image_processor(lowercase , return_tensors='''pt''' )
__lowercase = model(**lowercase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 534 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> int:
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()]
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
UpperCamelCase = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 713 |
'''simple docstring'''
from math import factorial
def lowercase__ ( __UpperCamelCase = 20 )-> int:
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class snake_case_ ( UpperCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 375 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Any = False, False, False
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCamelCase = field(default='''Audio''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_ )
def __call__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(__lowerCamelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
__lowercase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767
__lowercase = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def UpperCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__lowercase , __lowercase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__lowercase = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split('::' )[-1]
try:
__lowercase = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['repo_id']
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(__lowerCamelCase , 'rb' , use_auth_token=__lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(__lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__lowercase = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__lowercase = storage.field('bytes' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__lowercase = storage.field('path' )
else:
__lowercase = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Any ):
with xopen(__lowerCamelCase , 'rb' ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowercase = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 375 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxBertModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 350 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {'vocab_file': 'spiece.model'}
snake_case_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ : Dict = {
'AI-Sweden/gpt-sw3-126m': 2_048,
'AI-Sweden/gpt-sw3-350m': 2_048,
'AI-Sweden/gpt-sw3-1.6b': 2_048,
'AI-Sweden/gpt-sw3-6.7b': 2_048,
'AI-Sweden/gpt-sw3-20b': 2_048,
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
UpperCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase = unk_token if pad_token is None else pad_token
UpperCamelCase = eos_token if bos_token is None else bos_token
else:
UpperCamelCase = '''<pad>''' if pad_token is None else pad_token
UpperCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase = re.compile(
f'[{"".join(map(lowerCamelCase__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.non_printing_characters_re.sub('''''' , lowerCamelCase__ )
# Normalize whitespaces
UpperCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
UpperCamelCase = unicodedata.normalize('''NFC''' , lowerCamelCase__ )
return text
def UpperCAmelCase ( self , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.preprocess_text(lowerCamelCase__ )
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = ''''''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = self.preprocess_text(lowerCamelCase__ )
UpperCamelCase = self.sp_model.encode(lowerCamelCase__ )
else:
UpperCamelCase = [self.preprocess_text(lowerCamelCase__ ) for t in text]
UpperCamelCase = self.sp_model.encode(lowerCamelCase__ )
if return_tensors is True or return_tensors == "pt":
UpperCamelCase = torch.tensor(lowerCamelCase__ )
return token_ids
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.decode(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
UpperCamelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCamelCase__ ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase__ )
| 350 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = tempfile.mkdtemp()
__A : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : List[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__A : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__A : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__A : int = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__A : Optional[int] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = self.prepare_image_inputs()
__A : Optional[int] = image_processor(lowerCamelCase , return_tensors="np" )
__A : Dict = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[Any] = "lower newer"
__A : Union[str, Any] = processor(text=lowerCamelCase )
__A : int = tokenizer(lowerCamelCase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Tuple = "lower newer"
__A : int = self.prepare_image_inputs()
__A : str = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Dict = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[int] = processor.batch_decode(lowerCamelCase )
__A : Optional[int] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = "lower newer"
__A : str = self.prepare_image_inputs()
__A : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 111 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = tempfile.mkdtemp()
__A : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : List[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__A : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__A : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__A : int = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__A : Optional[int] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = self.prepare_image_inputs()
__A : Optional[int] = image_processor(lowerCamelCase , return_tensors="np" )
__A : Dict = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[Any] = "lower newer"
__A : Union[str, Any] = processor(text=lowerCamelCase )
__A : int = tokenizer(lowerCamelCase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Tuple = "lower newer"
__A : int = self.prepare_image_inputs()
__A : str = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Dict = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[int] = processor.batch_decode(lowerCamelCase )
__A : Optional[int] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = "lower newer"
__A : str = self.prepare_image_inputs()
__A : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 111 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase = list[list[float | int]]
def _lowerCAmelCase( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ) -> Matrix:
lowerCAmelCase__ = len(UpperCAmelCase_ )
lowerCAmelCase__ = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for row in range(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
lowerCAmelCase__ = matrix[row][col]
lowerCAmelCase__ = vector[row][0]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while row < size and col < size:
# pivoting
lowerCAmelCase__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase_ , UpperCAmelCase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase__ ,lowerCAmelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCAmelCase_ ):
lowerCAmelCase__ = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase__ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCAmelCase_ ):
for row in range(UpperCAmelCase_ ):
lowerCAmelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase_ )
]
def _lowerCAmelCase( UpperCAmelCase_ : list[int] ) -> Callable[[int], int]:
lowerCAmelCase__ = len(UpperCAmelCase_ )
lowerCAmelCase__ = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = [[0] for _ in range(UpperCAmelCase_ )]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for x_val, y_val in enumerate(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
lowerCAmelCase__ = (x_val + 1) ** (size - col - 1)
lowerCAmelCase__ = y_val
lowerCAmelCase__ = solve(UpperCAmelCase_ , UpperCAmelCase_ )
def interpolated_func(UpperCAmelCase_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase_ ) )
return interpolated_func
def _lowerCAmelCase( UpperCAmelCase_ : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowerCAmelCase( UpperCAmelCase_ : Callable[[int], int] = question_function , UpperCAmelCase_ : int = 10 ) -> int:
lowerCAmelCase__ = [func(UpperCAmelCase_ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase__ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for poly in polynomials:
lowerCAmelCase__ = 1
while func(UpperCAmelCase_ ) == poly(UpperCAmelCase_ ):
x_val += 1
ret += poly(UpperCAmelCase_ )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def snake_case_ ( A_ : List[Any], A_ : Tuple ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCamelCase : Any = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_lowerCamelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCamelCase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCamelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case_ ( A_ : List[str], A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : str = dct.pop(A_ )
_lowerCamelCase : str = val
def snake_case_ ( A_ : Dict ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
_lowerCamelCase : int = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase : List[str] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_lowerCamelCase : Dict = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def snake_case_ ( A_ : Tuple, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig(image_size=3_84, qkv_bias=A_ )
_lowerCamelCase : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCamelCase : Any = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCamelCase : str = 10_24
_lowerCamelCase : List[str] = 40_96
_lowerCamelCase : Any = 24
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : str = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[Any] = 10_24
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = False
_lowerCamelCase : Tuple = False
# load HuggingFace model
_lowerCamelCase : List[Any] = ViTModel(A_, add_pooling_layer=A_ )
_lowerCamelCase : List[str] = TrOCRForCausalLM(A_ )
_lowerCamelCase : List[Any] = VisionEncoderDecoderModel(encoder=A_, decoder=A_ )
model.eval()
# load state_dict of original model, rename some keys
_lowerCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(A_, map_location='''cpu''', check_hash=A_ )['''model''']
_lowerCamelCase : Tuple = create_rename_keys(A_, A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
read_in_q_k_v(A_, A_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCamelCase : Union[str, Any] = state_dict.pop(A_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_lowerCamelCase : Dict = val
else:
_lowerCamelCase : Dict = val
# load state dict
model.load_state_dict(A_ )
# Check outputs on an image
_lowerCamelCase : str = ViTImageProcessor(size=encoder_config.image_size )
_lowerCamelCase : List[str] = RobertaTokenizer.from_pretrained('''roberta-large''' )
_lowerCamelCase : List[str] = TrOCRProcessor(A_, A_ )
_lowerCamelCase : Dict = processor(images=prepare_img(A_ ), return_tensors='''pt''' ).pixel_values
# verify logits
_lowerCamelCase : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCamelCase : Tuple = model(pixel_values=A_, decoder_input_ids=A_ )
_lowerCamelCase : Optional[Any] = outputs.logits
_lowerCamelCase : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCamelCase : Dict = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCamelCase : Optional[int] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCamelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCamelCase : Tuple = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], A_, atol=1E-3 ), "First elements of logits not as expected"
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 83 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342 | 0 |
'''simple docstring'''
import json
import sys
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ) -> int:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case = json.load(__lowerCAmelCase )
snake_case = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__lowerCAmelCase ):
snake_case = results[benchmark_name]
snake_case = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
snake_case = """| metric |"""
snake_case = """|--------|"""
snake_case = """| new / old (diff) |"""
for metric_name in sorted(__lowerCAmelCase ):
snake_case = benchmark_res[metric_name]
snake_case = metric_vals["""new"""]
snake_case = metric_vals.get("""old""" , __lowerCAmelCase )
snake_case = metric_vals.get("""diff""" , __lowerCAmelCase )
snake_case = F''' {new_val:f}''' if isinstance(__lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(__lowerCAmelCase ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = sys.argv[1]
_SCREAMING_SNAKE_CASE = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 517 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 517 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : List[Any] = 1_00 ) -> Any:
_lowerCAmelCase : Dict = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : Dict = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 384 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
snake_case = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Optional[Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = eval_examples
SCREAMING_SNAKE_CASE : List[Any] = post_process_function
SCREAMING_SNAKE_CASE : Any = quant_trainer_args
SCREAMING_SNAKE_CASE : Optional[Any] = 128 # default number of calibration samples
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
SCREAMING_SNAKE_CASE : str = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE : str = self._remove_unused_columns(UpperCAmelCase_ , description="Calibration" )
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int]=None ):
SCREAMING_SNAKE_CASE : Any = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_calib_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_ )
logger.info("***** Running calibration *****" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase_ ):
# Prediction step
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : Optional[int] = model
def _A ( self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str = "eval" ):
SCREAMING_SNAKE_CASE : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE : Tuple = self.get_eval_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : int = eval_loop(
UpperCAmelCase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE : int = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions )
SCREAMING_SNAKE_CASE : Any = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics.pop(UpperCAmelCase_ )
self.log(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def _A ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str = "test" ):
SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE : str = eval_loop(
UpperCAmelCase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE : Union[str, Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE : Optional[Any] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , "predict" )
SCREAMING_SNAKE_CASE : str = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE : str = metrics.pop(UpperCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : int="./" ):
SCREAMING_SNAKE_CASE : List[Any] = self.eval_dataset
SCREAMING_SNAKE_CASE : List[Any] = self.get_eval_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = next(iter(UpperCAmelCase_ ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
SCREAMING_SNAKE_CASE : Tuple = tuple(v.to(UpperCAmelCase_ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Any = self.model.to(UpperCAmelCase_ )
model.eval()
model.float()
SCREAMING_SNAKE_CASE : str = model.module if hasattr(UpperCAmelCase_ , "module" ) else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase_ , "model.onnx" )
logger.info(f'''exporting model to {output_model_file}''' )
SCREAMING_SNAKE_CASE : int = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=13 , do_constant_folding=UpperCAmelCase_ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCAmelCase_ , )
logger.info("onnx export finished" )
| 62 | 0 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int = 1_00_00_00 ) -> int:
'''simple docstring'''
_a = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCAmelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 532 |
'''simple docstring'''
from functools import reduce
a_ : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : str(int(lowerCAmelCase__ ) * int(lowerCAmelCase__ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase__ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 532 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_UpperCAmelCase : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase__ : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : Optional[Any] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean
UpperCAmelCase__ : Dict = image_std
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : str = rescale_factor
UpperCAmelCase__ : List[str] = do_pad
def snake_case__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
if not batched:
UpperCAmelCase__ : List[Any] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image):
UpperCAmelCase__ , UpperCAmelCase__ : str = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * h / w)
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
elif w > h:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h)
else:
UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""]
UpperCAmelCase__ : int = self.size["""shortest_edge"""]
else:
UpperCAmelCase__ : str = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
UpperCAmelCase__ : Any = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[0])[0]
UpperCAmelCase__ : List[Any] = max(_lowerCamelCase , key=lambda _lowerCamelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = DeformableDetrImageProcessingTester(self)
@property
def snake_case__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCamelCase , """image_mean"""))
self.assertTrue(hasattr(_lowerCamelCase , """image_std"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_resize"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_rescale"""))
self.assertTrue(hasattr(_lowerCamelCase , """do_pad"""))
self.assertTrue(hasattr(_lowerCamelCase , """size"""))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCamelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , _lowerCamelCase)
def snake_case__ ( self):
pass
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray)
# Test not batched input
UpperCAmelCase__ : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : Optional[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self):
# Initialize image_processing
UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor)
# Test not batched input
UpperCAmelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCamelCase , return_tensors="""pt""").pixel_values
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self):
# prepare image and target
UpperCAmelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Dict = json.loads(f.read())
UpperCAmelCase__ : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
UpperCAmelCase__ : Dict = DeformableDetrImageProcessor()
UpperCAmelCase__ : int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Optional[int] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : Any = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify orig_size
UpperCAmelCase__ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : List[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase))
@slow
def snake_case__ ( self):
# prepare image, target and masks_path
UpperCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
UpperCAmelCase__ : Optional[int] = json.loads(f.read())
UpperCAmelCase__ : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
UpperCAmelCase__ : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
UpperCAmelCase__ : List[str] = DeformableDetrImageProcessor(format="""coco_panoptic""")
UpperCAmelCase__ : Tuple = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="""pt""")
# verify pixel values
UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4))
# verify area
UpperCAmelCase__ : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCamelCase))
# verify boxes
UpperCAmelCase__ : List[str] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCamelCase)
UpperCAmelCase__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCamelCase , atol=1e-3))
# verify image_id
UpperCAmelCase__ : Tuple = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCamelCase))
# verify is_crowd
UpperCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCamelCase))
# verify class_labels
UpperCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCamelCase))
# verify masks
UpperCAmelCase__ : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCamelCase)
# verify orig_size
UpperCAmelCase__ : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCamelCase))
# verify size
UpperCAmelCase__ : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCamelCase)) | 407 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = LongformerTokenizer
__snake_case = True
__snake_case = LongformerTokenizerFast
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case_ = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ = {"""unk_token""": """<unk>"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = """lower newer"""
snake_case_ = """lower newer"""
return input_text, output_text
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = """lower newer"""
snake_case_ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case_ = tokenizer.tokenize(A__ ) # , add_prefix_space=True)
self.assertListEqual(A__ , A__ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase__ ( self ):
snake_case_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=A__ )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A__ )
snake_case_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(A__ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = """Encode this sequence."""
snake_case_ = tokenizer.byte_encoder[""" """.encode('''utf-8''' )[0]]
# Testing encoder arguments
snake_case_ = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A__ , A__ )
snake_case_ = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A__ , A__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
snake_case_ = tokenizer.encode(A__ , add_special_tokens=A__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A__ , A__ )
# Testing spaces after special tokens
snake_case_ = """<mask>"""
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A__ , lstrip=A__ , rstrip=A__ )} ) # mask token has a left space
snake_case_ = tokenizer.convert_tokens_to_ids(A__ )
snake_case_ = """Encode <mask> sequence"""
snake_case_ = """Encode <mask>sequence"""
snake_case_ = tokenizer.encode(A__ )
snake_case_ = encoded.index(A__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A__ , A__ )
snake_case_ = tokenizer.encode(A__ )
snake_case_ = encoded.index(A__ )
snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A__ , A__ )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ = """A, <mask> AllenNLP sentence."""
snake_case_ = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
snake_case_ = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCamelCase__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , A__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , A__ )
def UpperCamelCase__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
snake_case_ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
snake_case_ = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
snake_case_ = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , ) | 720 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ = current_numerator
snake_case_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000)) | 531 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : int=3 , _lowerCamelCase : Optional[int]=1_8 , _lowerCamelCase : int=3_0 , _lowerCamelCase : int=4_0_0 , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[str]=True , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=None , ):
A__ = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = size
A__ = do_normalize
A__ = do_convert_rgb
A__ = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
A__ = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def A__ ( self : Optional[int] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Optional[Any] ):
A__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : int ):
A__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Any ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def A__ ( self : Tuple ):
A__ = self.image_processor_tester.prepare_dummy_image()
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = 2_0_4_8
A__ = image_processor(_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1E-3 , rtol=1E-3 ) )
def A__ ( self : Dict ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A__ = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self : List[str] ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
A__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowerCamelCase ):
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
A__ = '''Hello'''
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase , header_text=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A__ = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase , header_text=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self : Tuple ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
A__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A__ = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A__ ( self : Dict ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A__ = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase ( a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : List[Any] ):
A__ = PixaStructImageProcessingTester(self , num_channels=4 )
A__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[Any] ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def A__ ( self : str ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
A__ = image_processor(
_lowerCamelCase , return_tensors='''pt''' , max_patches=_lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 571 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__snake_case : List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 571 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Dict:
_lowerCamelCase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowerCamelCase = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCAmelCase__ )
if matches:
_lowerCamelCase = float(matches[1] )
_lowerCamelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCamelCase = 10_01
_lowerCamelCase = 'imagenet-1k-id2label.json'
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(lowerCAmelCase__ ) + 1: v for k, v in idalabel.items()}
_lowerCamelCase = 'background'
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Optional[int]:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[Any]=False ) -> Any:
_lowerCamelCase = get_mobilenet_va_config(lowerCAmelCase__ )
# Load 🤗 model
_lowerCamelCase = MobileNetVaForImageClassification(lowerCAmelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCamelCase = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCamelCase = model(**lowerCAmelCase__ )
_lowerCamelCase = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
_lowerCamelCase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCamelCase = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
_lowerCamelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
_lowerCamelCase = 'google/' + model_name
image_processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 709 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : Any=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : List[Any]=False ) -> List[str]:
_lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str ) -> Tuple:
for i in range(config.num_hidden_layers ):
_lowerCamelCase = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCamelCase : Tuple ) -> List[Any]:
_lowerCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str ) -> Any:
_lowerCamelCase = dct.pop(UpperCamelCase )
_lowerCamelCase = val
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> Optional[int]:
_lowerCamelCase = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
if "vqa" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = 31_29
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'vqa2-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
_lowerCamelCase = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = 2
_lowerCamelCase = {0: 'False', 1: 'True'}
_lowerCamelCase = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase = 3
_lowerCamelCase = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase = True
_lowerCamelCase = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='cpu' )['state_dict']
_lowerCamelCase = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
_lowerCamelCase = ViltImageProcessor(size=3_84 )
_lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCamelCase = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCamelCase ).raw )
_lowerCamelCase = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCamelCase ).raw )
_lowerCamelCase = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=UpperCamelCase ).raw )
if mlm_model:
_lowerCamelCase = 'a bunch of [MASK] laying on a [MASK].'
else:
_lowerCamelCase = 'How many cats are there?'
_lowerCamelCase = processor(UpperCamelCase , UpperCamelCase , return_tensors='pt' )
_lowerCamelCase = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase = torch.Size([1, 11, 3_05_22] )
_lowerCamelCase = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase = torch.Size([1, 31_29] )
_lowerCamelCase = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase = torch.Size([1, 2] )
_lowerCamelCase = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 234 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __A , __A , __A )
move_disk(__A , __A )
move_tower(height - 1 , __A , __A , __A )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
print('moving disk from' , __A , 'to' , __A )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = int(input('Height of hanoi: ' ).strip() )
move_tower(__A , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 469 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ : List[Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A_ : Tuple = parser.parse_args()
A_ : Optional[int] = 'cpu'
A_ : Dict = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A_ : List[str] = 'path-to-your-trained-model'
A_ : Any = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A_ : Tuple = pipe.to(device)
# to channels last
A_ : List[str] = pipe.unet.to(memory_format=torch.channels_last)
A_ : List[str] = pipe.vae.to(memory_format=torch.channels_last)
A_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A_ : Union[str, Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A_ : List[Any] = torch.randn(2, 4, 64, 64)
A_ : int = torch.rand(1) * 999
A_ : int = torch.randn(2, 77, 768)
A_ : Tuple = (sample, timestep, encoder_hidden_status)
try:
A_ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A_ : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A_ : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A_ : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A_ : Union[str, Any] = 666
A_ : Optional[Any] = torch.Generator(device).manual_seed(seed)
A_ : Union[str, Any] = {'generator': generator}
if args.steps is not None:
A_ : int = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A_ : int = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 265 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a_ : List[Any] = False
class _snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = generator.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 706 |
import os
# Precomputes a list of the 100 first triangular numbers
a_ : str = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'words.txt')
SCREAMING_SNAKE_CASE = ''
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = f.readline()
SCREAMING_SNAKE_CASE = [word.strip('"') for word in words.strip('\r\n').split(',')]
SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(_UpperCAmelCase) - 64 for x in word) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase)
if __name__ == "__main__":
print(solution())
| 444 | 0 |
SCREAMING_SNAKE_CASE : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE : Optional[int] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE : Tuple = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE : List[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 89 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a__:
def __init__( self : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any]=1_00 , __snake_case : Tuple=13 , __snake_case : Tuple=30 , __snake_case : List[str]=2 , __snake_case : str=3 , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : Any=32 , __snake_case : Optional[Any]=4 , __snake_case : Optional[int]=4 , __snake_case : Optional[Any]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=10 , __snake_case : str=0.02 , __snake_case : Any=3 , __snake_case : Dict=None , __snake_case : Union[str, Any]=[0, 1, 2, 3] , ):
a : Optional[int] = parent
a : List[Any] = 1_00
a : Tuple = batch_size
a : Dict = image_size
a : int = patch_size
a : Any = num_channels
a : str = is_training
a : Optional[int] = use_labels
a : Union[str, Any] = hidden_size
a : List[Any] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[int] = intermediate_size
a : List[str] = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Any = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[Any] = scope
a : Tuple = out_indices
a : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : List[str] = (image_size // patch_size) ** 2
a : Any = num_patches + 1
def lowercase_ ( self : List[Any] ):
a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : List[Any] = None
a : Optional[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self : Any ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
a : Union[str, Any] = BeitModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : int = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any ):
a : Dict = BeitForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] ):
a : Dict = self.type_sequence_label_size
a : Tuple = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Optional[int] = 1
a : Tuple = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Dict = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Any , __snake_case : str ):
a : Optional[Any] = self.num_labels
a : str = BeitForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a : List[str] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase_ ( self : Tuple ):
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a , a : Dict = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Optional[Any] ):
a : int = BeitModelTester(self )
a : Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def lowercase_ ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self : str ):
pass
def lowercase_ ( self : int ):
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase_ ( self : Optional[int] ):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[int] = model_class(__snake_case )
a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Optional[Any] = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Tuple ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def lowercase_ ( self : Union[str, Any] ):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def lowercase_ ( self : Tuple ):
if not self.model_tester.is_training:
return
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]:
continue
a : int = model_class(__snake_case )
model.to(__snake_case )
model.train()
a : Any = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
a : str = model(**__snake_case ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ):
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a : Union[str, Any] = False
a : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a : int = model_class(__snake_case )
model.gradient_checkpointing_enable()
model.to(__snake_case )
model.train()
a : Tuple = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
a : Union[str, Any] = model(**__snake_case ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ):
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
a : List[str] = model_class(config=__snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowercase_ ( self : Any ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = BeitModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ( ):
a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : str ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[int] ):
a : int = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__snake_case )
a : Optional[int] = self.default_image_processor
a : Union[str, Any] = prepare_img()
a : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt' ).pixel_values.to(__snake_case )
# prepare bool_masked_pos
a : Any = torch.ones((1, 1_96) , dtype=torch.bool ).to(__snake_case )
# forward pass
with torch.no_grad():
a : List[str] = model(pixel_values=__snake_case , bool_masked_pos=__snake_case )
a : int = outputs.logits
# verify the logits
a : str = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __snake_case )
a : Dict = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __snake_case , atol=1e-2 ) )
@slow
def lowercase_ ( self : Optional[int] ):
a : List[Any] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__snake_case )
a : Any = self.default_image_processor
a : int = prepare_img()
a : Optional[int] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a : Optional[int] = model(**__snake_case )
a : List[str] = outputs.logits
# verify the logits
a : str = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __snake_case )
a : Tuple = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
a : int = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : List[str] = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__snake_case )
a : Dict = self.default_image_processor
a : List[str] = prepare_img()
a : Dict = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a : str = model(**__snake_case )
a : int = outputs.logits
# verify the logits
a : Any = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __snake_case )
a : Union[str, Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
a : Dict = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowercase_ ( self : List[Any] ):
a : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
a : Tuple = model.to(__snake_case )
a : List[Any] = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
a : str = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
a : List[Any] = Image.open(ds[0]['file'] )
a : int = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a : Dict = model(**__snake_case )
a : str = outputs.logits
# verify the logits
a : Optional[int] = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __snake_case )
a : List[str] = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
a : str = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__snake_case , )
else:
a : Union[str, Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : List[Any] ):
a : Tuple = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
a : List[Any] = model.to(__snake_case )
a : List[Any] = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
a : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
a : Optional[int] = Image.open(ds[0]['file'] )
a : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a : int = model(**__snake_case )
a : Dict = outputs.logits.detach().cpu()
a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
a : Union[str, Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
a : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
a : Optional[int] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __snake_case ) | 195 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCAmelCase: Any = False
@skip_mps
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableDiffusionAttendAndExcitePipeline
lowercase__ = False
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Optional[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : List[Any] ):
torch.manual_seed(0 )
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
a : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
a : Any = CLIPTextModel(__snake_case )
a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : str , __snake_case : Tuple , __snake_case : Optional[int]=0 ):
if str(__snake_case ).startswith('mps' ):
a : Any = torch.manual_seed(__snake_case )
else:
a : Any = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : int = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowercase_ ( self : List[Any] ):
a : Union[str, Any] = 'cpu'
a : Any = self.get_dummy_components()
a : List[str] = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Any = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case ).images
a : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a : str = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def lowercase_ ( self : Dict ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowercase_ ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowercase_ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self : str ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowercase_ ( self : Any ):
super().test_save_load_local(expected_max_difference=5e-4 )
def lowercase_ ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class a__( unittest.TestCase ):
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = torch.manual_seed(51 )
a : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=__snake_case , torch_dtype=torch.floataa )
pipe.to('cuda' )
a : Optional[Any] = 'a painting of an elephant with glasses'
a : Any = [5, 7]
a : Tuple = pipe(
prompt=__snake_case , token_indices=__snake_case , guidance_scale=7.5 , generator=__snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1 | 195 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.