code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __snake_case ( unittest.TestCase ):
_a = JukeboxTokenizer
_a = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase__ ( self : str):
import torch
lowerCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
lowerCAmelCase_ : Any = tokenizer(**self.metas)['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def UpperCAmelCase__ ( self : Any):
import torch
lowerCAmelCase_ : str = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
lowerCAmelCase_ : List[Any] = tokenizer(**self.metas)['''input_ids''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 103 |
from pathlib import Path
import fire
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ):
lowerCAmelCase_ : List[str] = Path(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ : List[str] = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 103 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'tf_padding' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'depth_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu6", SCREAMING_SNAKE_CASE_=1280, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, ) -> List[Any]:
UpperCamelCase : str = parent
UpperCamelCase : str = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = depth_multiplier
UpperCamelCase : Optional[int] = depth_divisible_by
UpperCamelCase : Dict = min_depth
UpperCamelCase : Dict = expand_ratio
UpperCamelCase : Optional[Any] = tf_padding
UpperCamelCase : str = output_stride
UpperCamelCase : Union[str, Any] = first_layer_is_expansion
UpperCamelCase : Optional[Any] = finegrained_output
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase : Dict = classifier_dropout_prob
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = scope
def snake_case_ ( self ) -> Any:
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Tuple = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Any = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[Any] = self.num_labels
UpperCamelCase : Optional[int] = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = False
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[Any] = MobileNetVaModelTester(self )
UpperCamelCase : str = MobileNetVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def snake_case_ ( self ) -> List[Any]:
pass
def snake_case_ ( self ) -> int:
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Any = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = outputs.hidden_states
UpperCamelCase : List[str] = 16
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Any = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase : Tuple = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
UpperCamelCase : Tuple = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ShapEPipeline
UpperCAmelCase__ : List[Any] = ["prompt"]
UpperCAmelCase__ : List[str] = ["prompt"]
UpperCAmelCase__ : int = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Union[str, Any] = False
@property
def snake_case_ ( self ) -> Union[str, Any]:
return 32
@property
def snake_case_ ( self ) -> List[str]:
return 32
@property
def snake_case_ ( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[int]:
return 8
@property
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def snake_case_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase : int = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCamelCase : Any = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def snake_case_ ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase : Any = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCamelCase : Dict = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[Any] = self.dummy_prior
UpperCamelCase : int = self.dummy_text_encoder
UpperCamelCase : Dict = self.dummy_tokenizer
UpperCamelCase : List[str] = self.dummy_renderer
UpperCamelCase : str = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1024, prediction_type='sample', use_karras_sigmas=SCREAMING_SNAKE_CASE_, clip_sample=SCREAMING_SNAKE_CASE_, clip_sample_range=1.0, )
UpperCamelCase : List[str] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def snake_case_ ( self ) -> int:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[int] = output.images[0]
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> int:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> str:
UpperCamelCase : str = torch_device == 'cpu'
UpperCamelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=SCREAMING_SNAKE_CASE_, relax_max_difference=SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase : Union[str, Any] = 2
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase : List[Any] = batch_size * [inputs[key]]
UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_, num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
UpperCamelCase : Optional[int] = ShapEPipeline.from_pretrained('openai/shap-e' )
UpperCamelCase : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCamelCase : Dict = pipe(
'a shark', generator=SCREAMING_SNAKE_CASE_, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 103 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__lowerCamelCase = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__lowerCamelCase = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
A__ = json.loads(f.read() )
A__ = collections.OrderedDict()
A__ = collections.OrderedDict()
A__ = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = b
A__ = idx
for wd in b:
A__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCamelCase__( UpperCamelCase_ ):
lowerCAmelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[Any] = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<|endoftext|>" ,__UpperCAmelCase="<|endoftext|>" ,__UpperCAmelCase="<|startoftext|>" ,__UpperCAmelCase="<|endoftext|>" ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(
unk_token=lowercase_ ,pad_token=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,do_clean_text=lowercase_ ,**lowercase_ ,)
if not os.path.isfile(lowercase_ ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(lowercase_ ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
A__ = do_clean_text
A__ = load_vocab_and_emoji(lowercase_ ,lowercase_ )
A__ = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case__ ( self ) -> Optional[Any]:
return len(self.raw_vocab )
def snake_case__ ( self ) -> Dict:
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return self.subword_tokenizer.tokenize(lowercase_ ,clean=self.do_clean_text )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
return self.vocab.get(lowercase_ ,self.vocab.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
return self.subword_tokenizer.convert_id_to_token(lowercase_ )
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
A__ = ''.join(lowercase_ ).strip()
return out_string
def snake_case__ ( self ,__UpperCAmelCase ) -> List[int]:
A__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ ,add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
A__ = input_ids[-self.model_max_length :]
return input_ids
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
A__ = 0
if os.path.isdir(lowercase_ ):
A__ = os.path.join(
lowercase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(
lowercase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
A__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(lowercase_ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
A__ = token_index
writer.write(','.join(lowercase_ ) + '\n' )
index += 1
with open(lowercase_ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,lowercase_ )
return vocab_file, emoji_file
class UpperCamelCase__( UpperCamelCase_ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = vocab # same as swe
A__ = ids_to_tokens # same as bpe
A__ = emoji
A__ = np.max([len(lowercase_ ) for w in self.vocab.keys()] )
A__ = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
A__ = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
A__ = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
A__ = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A__ = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A__ = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
A__ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A__ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A__ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ) -> str:
return len(self.ids_to_tokens )
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.content_repattera.sub('<URL>' ,lowercase_ )
A__ = self.content_repattera.sub('<EMAIL>' ,lowercase_ )
A__ = self.content_repattera.sub('<TEL>' ,lowercase_ )
A__ = self.content_repattera.sub('<DATE>' ,lowercase_ )
A__ = self.content_repattera.sub('<DATE>' ,lowercase_ )
A__ = self.content_repattera.sub('<PRICE>' ,lowercase_ )
A__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A__ = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Tuple:
A__ = text.replace(' ' ,'<SP>' )
A__ = text.replace(' ' ,'<SP>' )
A__ = text.replace('\r\n' ,'<BR>' )
A__ = text.replace('\n' ,'<BR>' )
A__ = text.replace('\r' ,'<BR>' )
A__ = text.replace('\t' ,'<TAB>' )
A__ = text.replace('—' ,'ー' )
A__ = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
A__ = text.replace(lowercase_ ,lowercase_ )
if clean:
A__ = self.clean_text(lowercase_ )
def check_simbol(__UpperCAmelCase ):
A__ = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 2:
A__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(__UpperCAmelCase ):
A__ = x.encode()
if len(lowercase_ ) == 1 and len(lowercase_ ) == 3:
A__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
A__ = 0
A__ = []
while pos < len(lowercase_ ):
A__ = min(len(lowercase_ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A__ = [] # (token_id, token, pos)
for e in range(lowercase_ ,lowercase_ ,-1 ):
A__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowercase_ ) > 2:
A__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowercase_ ) > 0:
# the smallest token_id is adopted
A__ = sorted(lowercase_ ,key=lambda __UpperCAmelCase : x[0] )[0]
result.append(lowercase_ )
A__ = e
else:
A__ = pos + 1
A__ = text[pos:end]
if check_simbol(lowercase_ ):
result.append('<KIGOU>' )
elif checkuae(lowercase_ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
A__ = end
return result
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="\n" ) -> int:
A__ = []
A__ = []
A__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('utf-8' ,errors='replace' ) )
A__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(lowercase_ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(lowercase_ )
if len(lowercase_ ) > 0:
words.append(bytearray(lowercase_ ).decode('utf-8' ,errors='replace' ) )
A__ = ''.join(lowercase_ )
return text
| 221 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : Dict =[]
for i in range(self.num_layers ):
_lowerCamelCase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Optional[int] =resnets
_lowerCamelCase : Dict =attentions
if self.add_downsample:
_lowerCamelCase : Union[str, Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Union[str, Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : Union[str, Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[int] =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Union[str, Any] =resnets
if self.add_downsample:
_lowerCamelCase : List[str] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =()
for resnet in self.resnets:
_lowerCamelCase : Tuple =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Tuple =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Tuple =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : List[str] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : int =resnets
_lowerCamelCase : Dict =attentions
if self.add_upsample:
_lowerCamelCase : str =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : Optional[int] =res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] =res_hidden_states_tuple[:-1]
_lowerCamelCase : Any =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Optional[Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : int =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : str =resnets
if self.add_upsample:
_lowerCamelCase : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any=True ) -> int:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : List[str] =res_hidden_states_tuple[-1]
_lowerCamelCase : str =res_hidden_states_tuple[:-1]
_lowerCamelCase : List[str] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Union[str, Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Any =[]
for _ in range(self.num_layers ):
_lowerCamelCase : Optional[int] =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : List[Any] =resnets
_lowerCamelCase : List[str] =attentions
def __call__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=True ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Tuple =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[str] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 199 | 0 |
from collections import defaultdict
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = first_str.lower().strip()
UpperCamelCase__ = second_str.lower().strip()
# Remove whitespace
UpperCamelCase__ = first_str.replace(""" """ , """""" )
UpperCamelCase__ = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
UpperCamelCase__ = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase_ = input('''Enter the first string ''').strip()
lowerCamelCase_ = input('''Enter the second string ''').strip()
lowerCamelCase_ = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 178 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
lowerCAmelCase__ = f'https://www.google.com/search?q={query}&num=100'
lowerCAmelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
lowerCAmelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
lowerCAmelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 104 |
"""simple docstring"""
import string
def lowerCamelCase__ ( _lowerCamelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ = string.ascii_uppercase.find(_lowerCamelCase )
lowerCamelCase_ = num - key
if num < 0:
lowerCamelCase_ = num + len(string.ascii_uppercase )
lowerCamelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = input('Encrypted message: ' )
lowerCamelCase_ = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 183 | 0 |
"""simple docstring"""
import argparse
import copy
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = {}
with open(lowerCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCamelCase : Dict = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCamelCase : List[str] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCamelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCamelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _snake_case ( lowercase__ , lowercase__ ):
with open(lowerCamelCase_ ) as f:
_lowerCamelCase : Any = f.read(1 )
_lowerCamelCase : Dict = start_node
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = start_node
_lowerCamelCase : List[str] = 0
while visiting not in first_solution:
_lowerCamelCase : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase_ ) and k[0] not in first_solution:
_lowerCamelCase : int = k[1]
_lowerCamelCase : Optional[int] = k[0]
first_solution.append(lowerCamelCase_ )
_lowerCamelCase : Tuple = distance_of_first_solution + int(lowerCamelCase_ )
_lowerCamelCase : str = best_node
first_solution.append(lowerCamelCase_ )
_lowerCamelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCamelCase : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Union[str, Any] = []
for n in solution[1:-1]:
_lowerCamelCase : Optional[Any] = solution.index(lowerCamelCase_ )
for kn in solution[1:-1]:
_lowerCamelCase : Any = solution.index(lowerCamelCase_ )
if n == kn:
continue
_lowerCamelCase : int = copy.deepcopy(lowerCamelCase_ )
_lowerCamelCase : List[Any] = kn
_lowerCamelCase : Any = n
_lowerCamelCase : List[Any] = 0
for k in _tmp[:-1]:
_lowerCamelCase : str = _tmp[_tmp.index(lowerCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCamelCase : List[Any] = distance + int(i[1] )
_tmp.append(lowerCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCamelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = first_solution
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[Any] = distance_of_first_solution
_lowerCamelCase : Optional[Any] = solution
while count <= iters:
_lowerCamelCase : Union[str, Any] = find_neighborhood(lowerCamelCase_ , lowerCamelCase_ )
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = neighborhood[index_of_best_solution]
_lowerCamelCase : int = len(lowerCamelCase_ ) - 1
_lowerCamelCase : int = False
while not found:
_lowerCamelCase : Any = 0
while i < len(lowerCamelCase_ ):
if best_solution[i] != solution[i]:
_lowerCamelCase : List[str] = best_solution[i]
_lowerCamelCase : List[Any] = solution[i]
break
_lowerCamelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = best_solution[:-1]
_lowerCamelCase : Optional[int] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCamelCase : List[str] = cost
_lowerCamelCase : Union[str, Any] = solution
else:
_lowerCamelCase : Optional[Any] = index_of_best_solution + 1
_lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution]
if len(lowerCamelCase_ ) >= size:
tabu_list.pop(0 )
_lowerCamelCase : Optional[int] = count + 1
return best_solution_ever, best_cost
def _snake_case ( lowercase__=None ):
_lowerCamelCase : Union[str, Any] = generate_neighbours(args.File )
_lowerCamelCase : List[str] = generate_first_solution(
args.File , lowerCamelCase_ )
_lowerCamelCase : List[Any] = tabu_search(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 355 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : Any = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Any = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Dict = """roc_bert"""
def __init__( self : int , lowercase_ : int=30522 , lowercase_ : Optional[int]=768 , lowercase_ : str=12 , lowercase_ : str=12 , lowercase_ : List[str]=3072 , lowercase_ : Tuple="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Dict=512 , lowercase_ : Optional[Any]=2 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=0 , lowercase_ : Optional[int]="absolute" , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : List[Any]=768 , lowercase_ : List[str]=910 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=24858 , lowercase_ : int=True , **lowercase_ : List[Any] , ):
snake_case_ : Tuple = vocab_size
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Optional[int] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = type_vocab_size
snake_case_ : List[str] = layer_norm_eps
snake_case_ : str = use_cache
snake_case_ : Dict = enable_pronunciation
snake_case_ : Tuple = enable_shape
snake_case_ : Dict = pronunciation_embed_dim
snake_case_ : Tuple = pronunciation_vocab_size
snake_case_ : Dict = shape_embed_dim
snake_case_ : int = shape_vocab_size
snake_case_ : Dict = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
| 155 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Union[str, Any] = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : Any = mock.Mock()
snake_case_ : Tuple = 500
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = HTTPError
snake_case_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : List[Any] ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : List[Any] ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : int = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : List[str] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 155 | 1 |
__lowerCamelCase = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(__lowerCamelCase )}"""
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Any=32 , snake_case__ : Optional[Any]=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : List[Any]=16 , snake_case__ : int=[1, 2, 1] , snake_case__ : Dict=[2, 2, 4] , snake_case__ : Dict=2 , snake_case__ : Tuple=2.0 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.02 , snake_case__ : int=1e-5 , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=10 , snake_case__ : Optional[Any]=8 , snake_case__ : Any=["stage1", "stage2", "stage3"] , snake_case__ : Tuple=[1, 2, 3] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = parent
snake_case : Optional[int] = batch_size
snake_case : Union[str, Any] = image_size
snake_case : Dict = patch_size
snake_case : Optional[Any] = num_channels
snake_case : Union[str, Any] = embed_dim
snake_case : int = depths
snake_case : List[str] = num_heads
snake_case : Union[str, Any] = window_size
snake_case : Union[str, Any] = mlp_ratio
snake_case : List[Any] = qkv_bias
snake_case : List[Any] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Union[str, Any] = drop_path_rate
snake_case : int = hidden_act
snake_case : Optional[int] = use_absolute_embeddings
snake_case : int = patch_norm
snake_case : Union[str, Any] = layer_norm_eps
snake_case : Any = initializer_range
snake_case : Optional[Any] = is_training
snake_case : Tuple = scope
snake_case : Optional[int] = use_labels
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Union[str, Any] = encoder_stride
snake_case : Any = out_features
snake_case : Tuple = out_indices
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = MaskFormerSwinModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : List[Any] = model(snake_case__ )
snake_case : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Optional[int] = MaskFormerSwinBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : List[Any] = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case__ ):
snake_case : Tuple = ["stem"]
snake_case : List[Any] = MaskFormerSwinBackbone(config=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : List[Any] = config_and_inputs
snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A__ : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
A__ : Optional[Any] = False
A__ : List[Any] = False
A__ : List[str] = False
A__ : List[str] = False
A__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : str = MaskFormerSwinModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=snake_case__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
return
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case__ )
@unittest.skip("Swin does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case , snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] = [*signature.parameters.keys()]
snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Any:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
snake_case : int = outputs.hidden_states
snake_case : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# Swin has a different seq_length
snake_case : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case : int = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Dict = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = 3
snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case : str = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[Any] = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case__ : Union[str, Any] ):
snake_case : Any = 0
return t
def check_equivalence(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int]={} ):
with torch.no_grad():
snake_case : Optional[Any] = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ )
snake_case : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple()
def recursive_check(snake_case__ : List[str] , snake_case__ : Optional[Any] ):
if isinstance(snake_case__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ):
recursive_check(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case__ , snake_case__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}."""
) , )
recursive_check(snake_case__ , snake_case__ )
for model_class in self.all_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
snake_case : Tuple = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
snake_case : Dict = self._prepare_for_class(snake_case__ , snake_case__ )
snake_case : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} )
snake_case : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case : List[str] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"output_hidden_states": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase ,A_ ):
A__ : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A__ : int = MaskFormerSwinConfig
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = MaskFormerSwinModelTester(self )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
snake_case : Optional[int] = backbone_class(snake_case__ )
backbone.to(snake_case__ )
backbone.eval()
snake_case : Union[str, Any] = backbone(**snake_case__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case : Optional[int] = backbone(**snake_case__ , output_hidden_states=snake_case__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case , snake_case , snake_case : Dict = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case : Optional[Any] = backbone(**snake_case__ , output_attentions=snake_case__ )
self.assertIsNotNone(outputs.attentions )
| 59 | 1 |
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> float:
a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
a = 1 - (matter_density + radiation_density + dark_energy)
a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__UpperCamelCase : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 347 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( __lowerCamelCase ) -> bool:
a = int(number**0.5 )
return number == sq * sq
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple[int, int]:
a = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a = x_den * y_den * z_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __A ( __lowerCamelCase = 35 ) -> int:
a = set()
a = 42
a = Fraction(0 )
a = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a = x_num * y_den + x_den * y_num
a = x_den * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
a = x_num * y_num
a = x_den * y_num + x_num * y_den
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
a = x_num * x_num * y_num * y_num
a = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
a = int(sqrt(__lowerCamelCase ) )
a = int(sqrt(__lowerCamelCase ) )
a = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 347 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( ):
UpperCAmelCase = []
UpperCAmelCase = 1
while len(lowercase_ ) < 1e6:
constant.append(str(lowercase_ ) )
i += 1
UpperCAmelCase = ''.join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 78 |
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__a = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __snake_case( _lowerCAmelCase ) -> list[list[int]]:
snake_case__ : Optional[int] = []
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case__ : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_lowerCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_lowerCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_lowerCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case__ : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_lowerCAmelCase )
return next_generation
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[Image.Image]:
snake_case__ : Optional[Any] = []
for _ in range(_lowerCAmelCase ):
# Create output image
snake_case__ : List[str] = Image.new("""RGB""" , (len(cells[0] ), len(_lowerCAmelCase )) )
snake_case__ : Dict = img.load()
# Save cells to image
for x in range(len(_lowerCAmelCase ) ):
for y in range(len(cells[0] ) ):
snake_case__ : Dict = 255 - cells[y][x] * 255
snake_case__ : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(_lowerCAmelCase )
snake_case__ : Optional[Any] = new_generation(_lowerCAmelCase )
return images
if __name__ == "__main__":
__a = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 363 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = CycleDiffusionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : Dict ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case__ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case__ : Union[str, Any] = CLIPTextModel(snake_case_ )
snake_case__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase ( self : Optional[int] , snake_case_ : Any , snake_case_ : str=0 ):
snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : Tuple = torch.manual_seed(snake_case_ )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : str = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Tuple ):
snake_case__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs(snake_case_ )
snake_case__ : Any = pipe(**snake_case_ )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , """half""" ):
snake_case__ : Optional[int] = module.half()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : Union[str, Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : int = self.get_dummy_inputs(snake_case_ )
snake_case__ : List[Any] = pipe(**snake_case_ )
snake_case__ : Any = output.images
snake_case__ : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCamelCase ( self : int ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase ( self : int ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase ( self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case__ : Tuple = init_image.resize((512, 512) )
snake_case__ : List[Any] = """CompVis/stable-diffusion-v1-4"""
snake_case__ : Tuple = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : int = """A black colored car"""
snake_case__ : int = """A blue colored car"""
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCamelCase ( self : int ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case__ : Dict = init_image.resize((512, 512) )
snake_case__ : Tuple = """CompVis/stable-diffusion-v1-4"""
snake_case__ : List[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : str = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : Tuple = """A black colored car"""
snake_case__ : List[Any] = """A blue colored car"""
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Any = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 43 | 0 |
"""simple docstring"""
from math import factorial
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(A_ ) // (factorial(A_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"""4 for group projects, there are {combinations(40, 4)} ways""",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"""are {combinations(10, 3)} ways that first, second and""",
'''third place can be awarded.''',
)
| 72 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''') | 308 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A: str = None
A: List[Any] = logging.get_logger(__name__)
A: int = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A: List[Any] = {
"google/rembert": 2_5_6,
}
A: Any = "▁"
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = do_lower_case
UpperCAmelCase : Dict = remove_space
UpperCAmelCase : Any = keep_accents
UpperCAmelCase : Optional[Any] = vocab_file
UpperCAmelCase : List[str] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
UpperCAmelCase : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 76 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ : int = logging.get_logger(__name__)
A_ : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase :
"""simple docstring"""
UpperCAmelCase = field(
default=A__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(A__ )} )
UpperCAmelCase = field(
default=A__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
UpperCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
UpperCAmelCase = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
UpperCAmelCase = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
UpperCAmelCase = field(
default=A__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCAmelCase = field(
default=A__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
UpperCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
UpperCAmelCase = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
UpperCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
UpperCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class lowercase ( A__ ):
"""simple docstring"""
UpperCAmelCase = 'train'
UpperCAmelCase = 'dev'
class lowercase ( A__ ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def __init__( self ,a_ ,a_ ,a_ = None ,a_ = Split.train ,a_ = False ,a_ = None ,a_ = "pt" ,) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = args
_UpperCAmelCase : str = is_language_sensitive
_UpperCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
_UpperCAmelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_UpperCAmelCase : Union[str, Any] = mode
# Load data features from cache or dataset file
_UpperCAmelCase : Any = """v2""" if args.version_2_with_negative else """v1"""
_UpperCAmelCase : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : int = cached_features_file + """.lock"""
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
_UpperCAmelCase : List[str] = time.time()
_UpperCAmelCase : int = torch.load(__UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_UpperCAmelCase : Optional[int] = self.old_features["""features"""]
_UpperCAmelCase : Optional[int] = self.old_features.get("""dataset""" ,__UpperCAmelCase )
_UpperCAmelCase : Dict = self.old_features.get("""examples""" ,__UpperCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
""" future run""" )
else:
if mode == Split.dev:
_UpperCAmelCase : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
_UpperCAmelCase : Tuple = self.processor.get_train_examples(args.data_dir )
_UpperCAmelCase ,_UpperCAmelCase : str = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=__UpperCAmelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=__UpperCAmelCase ,)
_UpperCAmelCase : Tuple = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} ,__UpperCAmelCase ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Any:
return len(self.features )
def __getitem__( self ,a_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_UpperCAmelCase : Dict = self.features[i]
_UpperCAmelCase : Tuple = torch.tensor(feature.input_ids ,dtype=torch.long )
_UpperCAmelCase : List[str] = torch.tensor(feature.attention_mask ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor(feature.token_type_ids ,dtype=torch.long )
_UpperCAmelCase : Any = torch.tensor(feature.cls_index ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor(feature.p_mask ,dtype=torch.float )
_UpperCAmelCase : Optional[int] = torch.tensor(feature.is_impossible ,dtype=torch.float )
_UpperCAmelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_UpperCAmelCase : Optional[int] = torch.tensor(feature.start_position ,dtype=torch.long )
_UpperCAmelCase : Dict = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 215 |
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self : Tuple , __UpperCAmelCase : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = None
def __repr__( self : int ) -> str:
return F"""Node({self.data})"""
class lowerCamelCase :
def __init__( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = None
def __iter__( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE__ = node.next
def __len__( self : int ) -> int:
return sum(1 for _ in self )
def __repr__( self : int ) -> str:
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple , __UpperCAmelCase : int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = current.next
SCREAMING_SNAKE_CASE__ = data
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Any ) -> None:
self.insert_nth(len(self ) , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Any ) -> None:
self.insert_nth(0 , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
SCREAMING_SNAKE_CASE__ = Node(__UpperCAmelCase )
if self.head is None:
SCREAMING_SNAKE_CASE__ = new_node
elif index == 0:
SCREAMING_SNAKE_CASE__ = self.head # link new_node to head
SCREAMING_SNAKE_CASE__ = new_node
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None: # print every node data
print(self )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
SCREAMING_SNAKE_CASE__ = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE__ = self.head.next
else:
SCREAMING_SNAKE_CASE__ = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next
SCREAMING_SNAKE_CASE__ = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.head is None
def SCREAMING_SNAKE_CASE ( self : int ) -> None:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE__ = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE__ = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE__ = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE__ = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE__ = prev
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
SCREAMING_SNAKE_CASE__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
-9,
1_00,
Node(77_34_51_12 ),
"""dlrow olleH""",
7,
55_55,
0,
-1_92.5_55_55,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE__ = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE__ = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A ( ):
'''simple docstring'''
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case__ )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
SCREAMING_SNAKE_CASE__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case__ )
print(f"""length of linked_list is : {len(snake_case__ )}""" )
if __name__ == "__main__":
main()
| 165 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | """simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "roberta-prelayernorm"
def __init__( self : Optional[Any] , lowercase_ : List[str]=50265 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=12 , lowercase_ : List[str]=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int=True , lowercase_ : int=None , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_ : List[str] = classifier_dropout
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 91 |
"""simple docstring"""
from math import factorial
def _A (__a = 20 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ : List[str] = n // 2
return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 91 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any ) -> int:
_UpperCamelCase : int = ""
_UpperCamelCase : Tuple = ""
_UpperCamelCase : int = []
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = 256
_UpperCamelCase : str = 0
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : str = 0
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : List[Any] ) -> Optional[Any]:
_UpperCamelCase : Any = cva.imread(__a , 0 )
_UpperCamelCase : Union[str, Any] = copy.deepcopy(self.img )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
_UpperCamelCase : Dict = np.sum(__a )
for i in range(len(__a ) ):
_UpperCamelCase : Optional[int] = x[i] / self.k
self.sk += prk
_UpperCamelCase : int = (self.L - 1) * self.sk
if self.rem != 0:
_UpperCamelCase : Union[str, Any] = int(last % last )
_UpperCamelCase : Dict = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
_UpperCamelCase : List[Any] = int(np.ma.count(self.img ) / self.img[1].size )
_UpperCamelCase : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_UpperCamelCase : Any = self.img[j][i]
if num != self.last_list[num]:
_UpperCamelCase : Dict = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCamelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 310 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str=13 ,_UpperCAmelCase : Dict=10 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Optional[Any]=2 ,_UpperCAmelCase : Optional[Any]=2 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Dict=5 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : List[Any]=10 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Any=0.9 ,_UpperCAmelCase : Optional[int]=None ,):
_a : List[Any] = parent
_a : str = batch_size
_a : List[Any] = image_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = patch_size
_a : Dict = tubelet_size
_a : Tuple = num_frames
_a : List[Any] = is_training
_a : Union[str, Any] = use_labels
_a : List[Any] = hidden_size
_a : str = num_hidden_layers
_a : str = num_attention_heads
_a : str = intermediate_size
_a : Tuple = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Optional[int] = type_sequence_label_size
_a : List[str] = initializer_range
_a : Dict = mask_ratio
_a : str = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a : Tuple = (image_size // patch_size) ** 2
_a : int = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a : Optional[int] = int(mask_ratio * self.seq_length )
def __lowercase ( self : List[Any] ):
_a : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Union[str, Any] ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ):
_a : int = VideoMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : str ):
_a : Optional[int] = VideoMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : Dict = torch.ones((self.num_masks,) )
_a : Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a : Tuple = mask.expand(self.batch_size ,-1 ).bool()
_a : Any = model(_UpperCAmelCase ,_UpperCAmelCase )
# model only returns predictions for masked patches
_a : Any = mask.sum().item()
_a : str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : List[str] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCAmelCase : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
def __lowercase ( self : Dict ):
_a : Dict = VideoMAEModelTester(self )
_a : List[str] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=False ):
_a : Union[str, Any] = copy.deepcopy(_UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
_a : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a : Optional[Any] = mask.expand(self.model_tester.batch_size ,-1 ).bool()
_a : List[str] = bool_masked_pos.to(_UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(_UpperCAmelCase ),
]:
_a : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_UpperCAmelCase )
return inputs_dict
def __lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = VideoMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
if not self.has_attentions:
pass
else:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = True
for model_class in self.all_model_classes:
_a : Dict = self.model_tester.seq_length - self.model_tester.num_masks
_a : Optional[int] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a : Union[str, Any] = True
_a : str = False
_a : Optional[int] = True
_a : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Tuple = True
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
_a : List[str] = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_a : Dict = True
_a : Tuple = True
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(_UpperCAmelCase ) )
_a : int = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def __lowercase ( self : List[Any] ):
def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ):
_a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] = outputs.hidden_states
_a : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) ,_UpperCAmelCase )
_a : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
_a : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[int] ):
pass
def __lowerCamelCase ( ) -> List[Any]:
_a : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_a : Dict = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : int ):
_a : List[str] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
_UpperCAmelCase )
_a : List[str] = self.default_image_processor
_a : List[str] = prepare_video()
_a : List[str] = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Dict = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
@slow
def __lowercase ( self : Dict ):
_a : int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(_UpperCAmelCase )
_a : int = self.default_image_processor
_a : str = prepare_video()
_a : str = image_processor(_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# add boolean mask, indicating which patches to mask
_a : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' )
_a : Optional[int] = torch.load(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_UpperCAmelCase )
# verify the logits
_a : Any = torch.Size([1, 1408, 1536] )
_a : Optional[Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ,device=_UpperCAmelCase )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a : List[Any] = torch.tensor([0.51_42] ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ,norm_pix_loss=_UpperCAmelCase ).to(
_UpperCAmelCase )
with torch.no_grad():
_a : List[str] = model(**_UpperCAmelCase )
_a : int = torch.tensor(torch.tensor([0.64_69] ) ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 110 | 0 |
from __future__ import annotations
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE = result + left + right
return input_list
def __lowerCamelCase (UpperCAmelCase__ : list ):
if len(UpperCAmelCase__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE = 2
while p <= len(UpperCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = i + p - 1
SCREAMING_SNAKE_CASE = (low + high + 1) // 2
SCREAMING_SNAKE_CASE = merge(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = merge(UpperCAmelCase__ , 0 , UpperCAmelCase__ , len(UpperCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCamelCase : Optional[Any] = []
else:
_lowerCamelCase : Any = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 206 | import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase : Dict = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowerCamelCase : Optional[int] = None
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=UpperCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=UpperCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
def remove_articles(UpperCAmelCase__ : List[str] ):
return ARTICLES_REGEX.sub(" " , UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ : Dict ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
if not s:
return []
return normalize_answer(UpperCAmelCase__ ).split()
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ):
return int(normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = collections.Counter(UpperCAmelCase__ ) & collections.Counter(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(common.values() )
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa["id"]
SCREAMING_SNAKE_CASE = [t for t in qa["answers"]["text"] if normalize_answer(UpperCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = [""]
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
SCREAMING_SNAKE_CASE = max(compute_fa(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None ):
if not qid_list:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
plt.step(UpperCAmelCase__ , UpperCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(UpperCAmelCase__ , UpperCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase__ )
plt.savefig(UpperCAmelCase__ )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str=None ):
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(UpperCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE = true_pos / float(UpperCAmelCase__ )
if i == len(UpperCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase__ )
recalls.append(UpperCAmelCase__ )
if out_image:
plot_pr_curve(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ):
if out_image_dir and not os.path.exists(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
SCREAMING_SNAKE_CASE = {k: float(UpperCAmelCase__ ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_exact" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_f1" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_oracle" )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(UpperCAmelCase__ ) / float(len(UpperCAmelCase__ ) )
plt.hist(UpperCAmelCase__ , weights=UpperCAmelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCAmelCase__ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase__ ), best_thresh
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def __lowerCamelCase ():
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = dataset_json["data"]
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(UpperCAmelCase__ ) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ )
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "HasAns" )
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
else:
print(json.dumps(UpperCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 206 | 1 |
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a :Any = set()
# Replace all the whitespace in our sentence
a :str = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase_ ) == 26
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a :str = [False] * 26
for char in input_str:
if char.islower():
a :int = True
elif char.isupper():
a :List[str] = True
return all(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCamelCase ( ):
"""simple docstring"""
from timeit import timeit
a :str = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCAmelCase_ ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCAmelCase_ ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCAmelCase_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[str] = '''albert'''
def __init__( self ,SCREAMING_SNAKE_CASE__=3_00_00 ,SCREAMING_SNAKE_CASE__=1_28 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=1_63_84 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__="absolute" ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE :Optional[int] = embedding_size
__SCREAMING_SNAKE_CASE :Any = hidden_size
__SCREAMING_SNAKE_CASE :Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE :str = num_hidden_groups
__SCREAMING_SNAKE_CASE :Dict = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = inner_group_num
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :Dict = initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[Any] = classifier_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = position_embedding_type
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE :List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 361 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=None ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = parent
__SCREAMING_SNAKE_CASE :Tuple = batch_size
__SCREAMING_SNAKE_CASE :Tuple = seq_length
__SCREAMING_SNAKE_CASE :Any = is_training
__SCREAMING_SNAKE_CASE :Tuple = use_input_mask
__SCREAMING_SNAKE_CASE :List[Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE :int = use_labels
__SCREAMING_SNAKE_CASE :Dict = vocab_size
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :int = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Any = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :int = max_position_embeddings
__SCREAMING_SNAKE_CASE :Any = type_vocab_size
__SCREAMING_SNAKE_CASE :Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_labels
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE :str = scope
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE :Dict = None
__SCREAMING_SNAKE_CASE :Dict = None
__SCREAMING_SNAKE_CASE :Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE :Dict = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = NystromformerModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.num_labels
__SCREAMING_SNAKE_CASE :Any = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Dict = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE :Tuple = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.num_choices
__SCREAMING_SNAKE_CASE :Dict = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__SCREAMING_SNAKE_CASE :List[str] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE :List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE :Dict = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) :Dict = config_and_inputs
__SCREAMING_SNAKE_CASE :str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = NystromformerModelTester(self )
__SCREAMING_SNAKE_CASE :Optional[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE :Any = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Tuple = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__SCREAMING_SNAKE_CASE :Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :str = model(SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = '''the [MASK] of Belgium is Brussels'''
__SCREAMING_SNAKE_CASE :Optional[Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__SCREAMING_SNAKE_CASE :str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(encoding.input_ids ).logits
__SCREAMING_SNAKE_CASE :List[str] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ,'''capital''' ) | 239 | 0 |
"""simple docstring"""
from math import ceil
def lowercase (snake_case__ : List[Any] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = list(range(0 , snake_case__ ) )
lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def lowercase (snake_case__ : int , snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = list(range(snake_case__ ) )
lowerCAmelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 155 |
"""simple docstring"""
def lowercase (snake_case__ : list[int] , snake_case__ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
# Calculate the determinants of the matrices
lowerCAmelCase = aa * ba - aa * ba
lowerCAmelCase = ca * ba - ca * ba
lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase = determinant_x / determinant
lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 155 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = a.name
_lowerCamelCase : Optional[int] = b.name
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Dict = ""
_lowerCamelCase : str = a == b
_lowerCamelCase : str = name_a
_lowerCamelCase : List[str] = name_b
return res
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = list(model.graph.initializer )
_lowerCamelCase : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCamelCase : List[str] = inits[i].name
_lowerCamelCase : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = os.path.dirname(lowercase__ )
_lowerCamelCase : Optional[int] = os.path.basename(lowercase__ )
_lowerCamelCase : List[Any] = onnx.load(os.path.join(lowercase__ , lowercase__ ) )
_lowerCamelCase : Optional[Any] = list(model.graph.initializer )
_lowerCamelCase : str = set()
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = []
_lowerCamelCase : str = 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
_lowerCamelCase : Union[str, Any] = inits[j].data_type
_lowerCamelCase : Tuple = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
_lowerCamelCase : str = inits[i].name
_lowerCamelCase : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
_lowerCamelCase : Any = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowerCamelCase : List[Any] = sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : Optional[int] = "optimized_" + model_file_name
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model | 369 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[int] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a ( _lowerCamelCase ):
snake_case_ = "unispeech"
def __init__( self : int , lowercase_ : Any=32 , lowercase_ : Optional[int]=768 , lowercase_ : Union[str, Any]=12 , lowercase_ : Optional[int]=12 , lowercase_ : List[Any]=3072 , lowercase_ : Tuple="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.02 , lowercase_ : str=1e-5 , lowercase_ : Optional[int]="group" , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=(512, 512, 512, 512, 512, 512, 512) , lowercase_ : List[str]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : Dict=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : Optional[Any]=False , lowercase_ : Optional[Any]=128 , lowercase_ : Any=16 , lowercase_ : Optional[int]=False , lowercase_ : List[str]=True , lowercase_ : Optional[int]=0.05 , lowercase_ : Dict=10 , lowercase_ : int=2 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[Any]=10 , lowercase_ : Dict=0 , lowercase_ : str=320 , lowercase_ : Dict=2 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=100 , lowercase_ : Tuple=256 , lowercase_ : str=256 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Tuple=256 , lowercase_ : Tuple=80 , lowercase_ : int=0 , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.5 , **lowercase_ : Dict , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = list(lowercase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = num_ctc_classes
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# pretraining loss
snake_case_ = replace_prob
@property
def A_ ( self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 56 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__lowercase ) != 0:
__lowerCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
__lowerCAmelCase = rows
else:
__lowerCAmelCase = []
def _snake_case (self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case (self ):
return len(self.rows )
@property
def _snake_case (self ):
return len(self.rows[0] )
@property
def _snake_case (self ):
return (self.num_rows, self.num_columns)
@property
def _snake_case (self ):
return self.order[0] == self.order[1]
def _snake_case (self ):
__lowerCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def _snake_case (self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case (self ):
return bool(self.determinant() )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def _snake_case (self , __lowercase , __lowercase ):
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def _snake_case (self ):
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case (self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case (self ):
__lowerCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__(self ):
return str(self.rows )
def __str__(self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__lowercase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__lowercase )
else:
__lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
__lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__lowerCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self , __lowercase ):
return not self == other
def __neg__(self ):
return self * -1
def __add__(self , __lowercase ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self , __lowercase ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self , __lowercase ):
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__(self , __lowercase ):
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
__lowerCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A_ :
"""simple docstring"""
__UpperCamelCase = BlenderbotConfig
__UpperCamelCase = {}
__UpperCamelCase = """gelu"""
def __init__( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Any=13 , lowercase_ :Optional[int]=7 , lowercase_ :Union[str, Any]=True , lowercase_ :Union[str, Any]=False , lowercase_ :List[Any]=99 , lowercase_ :str=32 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=4 , lowercase_ :Any=37 , lowercase_ :Optional[Any]=0.1 , lowercase_ :Any=0.1 , lowercase_ :Optional[Any]=20 , lowercase_ :Dict=2 , lowercase_ :Optional[int]=1 , lowercase_ :Dict=0 , ) -> Dict:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def UpperCAmelCase__ ( self :str ) -> List[Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase__ ( self :str , lowercase_ :str , lowercase_ :List[str] ) -> Optional[int]:
UpperCAmelCase = TFBlenderbotModel(config=lowercase_ ).get_decoder()
UpperCAmelCase = inputs_dict['input_ids']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase = inputs_dict['head_mask']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ):
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = TFBlenderbotModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ["""My friends are cool but they eat too many carbs."""]
__UpperCamelCase = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 78 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """transfo-xl"""
__UpperCamelCase = ["""mems"""]
__UpperCamelCase = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :List[Any] , lowercase_ :Optional[int]=26_77_35 , lowercase_ :Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowercase_ :List[Any]=10_24 , lowercase_ :Optional[Any]=10_24 , lowercase_ :Tuple=16 , lowercase_ :Tuple=64 , lowercase_ :Any=40_96 , lowercase_ :int=4 , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=18 , lowercase_ :Optional[Any]=16_00 , lowercase_ :Dict=10_00 , lowercase_ :Optional[int]=True , lowercase_ :Tuple=True , lowercase_ :Dict=0 , lowercase_ :Tuple=-1 , lowercase_ :Optional[int]=True , lowercase_ :Optional[int]=0.1 , lowercase_ :str=0.0 , lowercase_ :List[str]=True , lowercase_ :int="normal" , lowercase_ :Dict=0.01 , lowercase_ :Optional[Any]=0.01 , lowercase_ :Dict=0.02 , lowercase_ :Tuple=1E-5 , lowercase_ :str=0 , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = vocab_size
UpperCAmelCase = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCAmelCase = [False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase = [False] + [False] * len(self.cutoffs )
UpperCAmelCase = d_model
UpperCAmelCase = d_embed
UpperCAmelCase = d_head
UpperCAmelCase = d_inner
UpperCAmelCase = div_val
UpperCAmelCase = pre_lnorm
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = mem_len
UpperCAmelCase = same_length
UpperCAmelCase = attn_type
UpperCAmelCase = clamp_len
UpperCAmelCase = sample_softmax
UpperCAmelCase = adaptive
UpperCAmelCase = dropout
UpperCAmelCase = dropatt
UpperCAmelCase = untie_r
UpperCAmelCase = init
UpperCAmelCase = init_range
UpperCAmelCase = proj_init_std
UpperCAmelCase = init_std
UpperCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 78 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__a: Dict = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **__lowerCAmelCase ) -> Union[str, Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : int = deprecated_arg[3:]
setattr(self , __A , not kwargs.pop(__A ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : Optional[int] = kwargs.pop('''torchscript''' , self.torchscript )
lowercase__ : List[str] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase__ : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__A )
SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"help": "Trace the models using torchscript"} )
SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
SCREAMING_SNAKE_CASE = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCAmelCase( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase__ : str = torch.device('''cpu''' )
lowercase__ : Tuple = 0
elif is_torch_tpu_available():
lowercase__ : int = xm.xla_device()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase__ : List[str] = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCAmelCase( self ) -> int:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCAmelCase( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCAmelCase( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
return self.n_gpu > 0
| 351 | '''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__a: Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : int = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Tuple = add_prefix_space
lowercase__ : List[str] = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[int]:
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 214 | 0 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 1_0001 ) -> int:
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 10**9 ) -> int:
'''simple docstring'''
lowercase : Optional[int] = 1
lowercase : Optional[Any] = 2
lowercase : str = 0
lowercase : int = 0
lowercase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowercase : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 53 |
"""simple docstring"""
_UpperCamelCase: Dict = 2_5_6
# Modulus to hash a string
_UpperCamelCase: Union[str, Any] = 1_0_0_0_0_0_3
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Dict = len(_UpperCAmelCase )
lowercase : Union[str, Any] = len(_UpperCAmelCase )
if p_len > t_len:
return False
lowercase : Union[str, Any] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCAmelCase ):
lowercase : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
'''simple docstring'''
lowercase : Any = 'abc1abc12'
lowercase : int = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowercase : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 2)
lowercase : str = 'ABABX'
lowercase : Tuple = 'ABABZABABYABABX'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 3)
lowercase : int = 'AAAB'
lowercase : Union[str, Any] = 'ABAAAAAB'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 4)
lowercase : Union[str, Any] = 'abcdabcy'
lowercase : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
# Test 5)
lowercase : Dict = 'Lü'
lowercase : Dict = 'Lüsai'
assert rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[Any] = 'Lue'
assert not rabin_karp(_UpperCAmelCase , _UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> list[list[int]]:
"""simple docstring"""
a_ : list[list[int]] = []
a_ : list[int] = []
a_ : Dict = 0
a_ : Optional[int] = sum(__A )
create_state_space_tree(__A , __A , __A , __A , __A , __A )
return result
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int , __A : list[int] , __A : list[list[int]] , __A : int , ) -> None:
"""simple docstring"""
if sum(__A ) > max_sum or (remaining_nums_sum + sum(__A )) < max_sum:
return
if sum(__A ) == max_sum:
result.append(__A )
return
for index in range(__A , len(__A ) ):
create_state_space_tree(
__A , __A , index + 1 , [*path, nums[index]] , __A , remaining_nums_sum - nums[index] , )
UpperCAmelCase_ : str = [3, 34, 4, 12, 5, 2]
UpperCAmelCase_ : Optional[Any] = 9
UpperCAmelCase_ : str = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 32 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = HfArgumentParser(__A )
a_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
a_ : List[Any] = TensorFlowBenchmark(args=__A )
try:
a_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] )
a_ : int = ''
a_ : int = eval(str(__A ).split(' ' )[-1] )
a_ : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__A )
if len(__A ) > 0:
a_ : str = full_error_msg + begin_error_msg + str(__A )
raise ValueError(__A )
benchmark.run()
if __name__ == "__main__":
main()
| 32 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCamelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 334 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=32, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=[10, 20, 30, 40], lowerCAmelCase__=[2, 2, 3, 2], lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=["stage2", "stage3", "stage4"], lowerCAmelCase__=3, lowerCAmelCase__=None, ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = num_stages
def a_ ( self) -> List[Any]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def a_ ( self) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowerCAmelCase__, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowerCAmelCase__, loss_ignore_index=255, num_labels=self.num_labels, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = UperNetForSemanticSegmentation(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def a_ ( self) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> List[str]:
snake_case_ = UperNetModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self) -> Optional[Any]:
return
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__)
@unittest.skip(reason='UperNet does not use inputs_embeds')
def a_ ( self) -> List[str]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings')
def a_ ( self) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model')
def a_ ( self) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model')
def a_ ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def a_ ( self) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def a_ ( self) -> Optional[Any]:
pass
def a_ ( self) -> Any:
def check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowerCAmelCase__)
snake_case_ = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@unittest.skip(reason='UperNet does not have tied weights')
def a_ ( self) -> Any:
pass
@slow
def a_ ( self) -> List[str]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = UperNetForSemanticSegmentation.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def UpperCAmelCase ( ) -> str:
snake_case_ = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
snake_case_ = Image.open(UpperCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny')
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny').to(lowerCAmelCase__)
snake_case_ = prepare_img()
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
snake_case_ = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCAmelCase__, atol=1e-4))
def a_ ( self) -> List[str]:
snake_case_ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny')
snake_case_ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny').to(lowerCAmelCase__)
snake_case_ = prepare_img()
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
snake_case_ = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowerCAmelCase__, atol=1e-4))
| 69 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311 | 0 |
import math
import sys
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = ""
try:
with open(__lowerCamelCase, "rb" ) as binary_file:
UpperCAmelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCAmelCase_ : List[str] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = {"0": "0", "1": "1"}
UpperCAmelCase_ : Optional[int] = "", ""
UpperCAmelCase_ : str = len(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
UpperCAmelCase_ : Union[str, Any] = last_match_id + "0"
if math.loga(__lowerCamelCase ).is_integer():
UpperCAmelCase_ : Optional[int] = {}
for curr_key in list(__lowerCamelCase ):
UpperCAmelCase_ : Dict = lexicon.pop(__lowerCamelCase )
UpperCAmelCase_ : int = new_lex
UpperCAmelCase_ : List[str] = last_match_id + "1"
index += 1
UpperCAmelCase_ : str = ""
return result
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = 8
try:
with open(__lowerCamelCase, "wb" ) as opened_file:
UpperCAmelCase_ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0, len(__lowerCamelCase ), __lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCamelCase, 2 ).to_bytes(1, byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase_ : int = data_bits[counter:]
UpperCAmelCase_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = read_file_binary(__lowerCamelCase )
UpperCAmelCase_ : str = remove_prefix(__lowerCamelCase )
UpperCAmelCase_ : Any = decompress_data(__lowerCamelCase )
write_file_binary(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 362 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Tuple=30 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Union[str, Any]=37 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=10 , lowerCAmelCase__ :Tuple=0.02 , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : List[str] = patch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : List[str] = num_patches + 1
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def __magic_name__( self :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = FlaxViTModel(config=lowercase_ )
__SCREAMING_SNAKE_CASE : int = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE : List[Any] = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE : Tuple = FlaxViTForImageClassification(config=lowercase_ )
__SCREAMING_SNAKE_CASE : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE : Any = 1
__SCREAMING_SNAKE_CASE : Optional[int] = FlaxViTForImageClassification(lowercase_ )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowercase_ )
def __magic_name__( self :List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) : Tuple = config_and_inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowercase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __magic_name__( self :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __magic_name__( self :List[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__( self :List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __magic_name__( self :List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[int] ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : Tuple = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __magic_name__( self :Any ) -> Dict:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
__SCREAMING_SNAKE_CASE : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 9 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_a = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : Tuple = FNetTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : int = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Tuple = remove_space
UpperCAmelCase_ : str = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : List[str] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 61 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase :
def __init__( self :Optional[Any] , _lowercase :int , _lowercase :Union[str, Any]=13 , _lowercase :Dict=7 , _lowercase :List[Any]=True , _lowercase :Union[str, Any]=True , _lowercase :Optional[Any]=True , _lowercase :Optional[Any]=True , _lowercase :Dict=99 , _lowercase :Optional[int]=32 , _lowercase :str=5 , _lowercase :str=4 , _lowercase :Dict=37 , _lowercase :str="gelu" , _lowercase :Optional[Any]=0.1 , _lowercase :List[str]=0.1 , _lowercase :List[str]=5_12 , _lowercase :List[str]=16 , _lowercase :Optional[Any]=2 , _lowercase :Union[str, Any]=0.02 , _lowercase :Any=3 , _lowercase :Optional[Any]=4 , _lowercase :List[str]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self :int , _lowercase :str , _lowercase :Optional[int] , _lowercase :str , _lowercase :int , _lowercase :Tuple , _lowercase :Union[str, Any] , _lowercase :int ):
'''simple docstring'''
lowercase__ = NystromformerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Tuple , _lowercase :int , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = NystromformerForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :List[str] , _lowercase :int , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = NystromformerForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self :int , _lowercase :Tuple , _lowercase :str , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int , _lowercase :Any , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = NystromformerForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self :int , _lowercase :str , _lowercase :int , _lowercase :Tuple , _lowercase :Dict , _lowercase :int , _lowercase :Dict , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = NystromformerForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :str , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :Any , _lowercase :int , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = NystromformerForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = NystromformerModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = NystromformerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase__ = model(_lowercase )[0]
lowercase__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , _lowercase )
lowercase__ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = "the [MASK] of Belgium is Brussels"
lowercase__ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowercase__ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowercase__ = tokenizer(_lowercase , return_tensors="pt" )
with torch.no_grad():
lowercase__ = model(encoding.input_ids ).logits
lowercase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_lowercase ) , "capital" )
| 357 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_UpperCAmelCase : Tuple = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
_UpperCAmelCase : Any = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
lowercase :str = state_dict.pop(lowerCamelCase )
# emb -> embedding
if name.startswith("emb." ):
lowercase :int = name.replace("emb.", "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
lowercase :int = name.replace("blocks.0.ln0", "blocks.0.pre_ln" )
# att -> attention
lowercase :int = re.sub(r"blocks\.(\d+)\.att", r"blocks.\1.attention", lowerCamelCase )
# ffn -> feed_forward
lowercase :Union[str, Any] = re.sub(r"blocks\.(\d+)\.ffn", r"blocks.\1.feed_forward", lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
lowercase :Union[str, Any] = name.replace(".time_mix_k", ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
lowercase :Any = name.replace(".time_mix_v", ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
lowercase :List[str] = name.replace(".time_mix_r", ".time_mix_receptance" )
if name != "head.weight":
lowercase :int = "rwkv." + name
lowercase :Optional[Any] = weight
return state_dict
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
lowercase :Optional[int] = 50277
lowercase :Any = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
lowercase :List[str] = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase )
lowercase :Tuple = len(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
# 2. Build the config
lowercase :int = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase :str = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowercase :Dict = RwkvConfig(
vocab_size=lowerCamelCase, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(lowerCamelCase )
# 3. Download model file then convert state_dict
lowercase :Optional[Any] = hf_hub_download(lowerCamelCase, lowerCamelCase )
lowercase :List[Any] = torch.load(lowerCamelCase, map_location="cpu" )
lowercase :Optional[Any] = convert_state_dict(lowerCamelCase )
# 4. Split in shards and save
lowercase , lowercase :Optional[int] = shard_checkpoint(lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase, os.path.join(lowerCamelCase, lowerCamelCase ) )
if index is not None:
lowercase :List[Any] = os.path.join(lowerCamelCase, lowerCamelCase )
# Save the index as well
with open(lowerCamelCase, "w", encoding="utf-8" ) as f:
lowercase :Tuple = json.dumps(lowerCamelCase, indent=2, sort_keys=lowerCamelCase ) + "\n"
f.write(lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
lowercase :Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase :List[Any] = torch.load(os.path.join(lowerCamelCase, lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(lowerCamelCase, lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
lowercase :str = AutoModelForCausalLM.from_pretrained(lowerCamelCase )
model.push_to_hub(lowerCamelCase, max_shard_size="2GB" )
tokenizer.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 236 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : int = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = 1 / sqrt(2 ) ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(__A ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A = 1 / sqrt(2 ) , ) -> IIRFilter:
_snake_case = tau * frequency / samplerate
_snake_case = sin(__A )
_snake_case = cos(__A )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(__A ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 42 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
"""simple docstring"""
from manim import *
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Rectangle(height=0.5 , width=0.5 )
__snake_case : int = Rectangle(height=0.25 , width=0.25 )
__snake_case : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__snake_case : Optional[int] = [mem.copy() for i in range(6 )]
__snake_case : str = [mem.copy() for i in range(6 )]
__snake_case : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Union[str, Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__snake_case : Tuple = Text('''CPU''' , font_size=24 )
__snake_case : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
__snake_case : int = [mem.copy() for i in range(4 )]
__snake_case : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : List[Any] = Text('''GPU''' , font_size=24 )
__snake_case : Any = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
__snake_case : List[str] = [mem.copy() for i in range(6 )]
__snake_case : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Dict = Text('''Model''' , font_size=24 )
__snake_case : int = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
__snake_case : List[str] = []
__snake_case : str = []
__snake_case : str = []
for i, rect in enumerate(a_ ):
rect.set_stroke(a_ )
__snake_case : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 )
self.add(a_ )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ , *a_ )
__snake_case : str = [mem.copy() for i in range(6 )]
__snake_case : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Optional[Any] = Text('''Loaded Checkpoint''' , font_size=24 )
__snake_case : Optional[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a_ )
__snake_case : Tuple = []
__snake_case : Optional[int] = []
for i, rect in enumerate(a_ ):
__snake_case : int = fill.copy().set_fill(a_ , opacity=0.7 )
target.move_to(a_ )
ckpt_arr.append(a_ )
__snake_case : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
__snake_case : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case : Optional[int] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
__snake_case : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
__snake_case : Tuple = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
__snake_case : Dict = [meta_mem.copy() for i in range(6 )]
__snake_case : Optional[int] = [meta_mem.copy() for i in range(6 )]
__snake_case : Any = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Optional[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
__snake_case : Optional[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
__snake_case : List[Any] = Text('''Disk''' , font_size=24 )
__snake_case : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) )
__snake_case : int = []
for i, rect in enumerate(a_ ):
__snake_case : Optional[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(FadeOut(a_ ) )
__snake_case : List[str] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
self.play(
FadeOut(a_ , a_ , *a_ , *a_ ) , )
self.wait()
| 368 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 0 |
from __future__ import annotations
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :list[list[int]] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowerCAmelCase__ ) != 0:
__SCREAMING_SNAKE_CASE : Tuple = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
__SCREAMING_SNAKE_CASE : Optional[Any] = rows
else:
__SCREAMING_SNAKE_CASE : Tuple = []
def __magic_name__( self :Dict ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __magic_name__( self :Any ) -> int:
return len(self.rows )
@property
def __magic_name__( self :List[Any] ) -> int:
return len(self.rows[0] )
@property
def __magic_name__( self :int ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __magic_name__( self :List[Any] ) -> bool:
return self.order[0] == self.order[1]
def __magic_name__( self :Optional[Any] ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __magic_name__( self :List[Any] ) -> bool:
return bool(self.determinant() )
def __magic_name__( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Any = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __magic_name__( self :List[str] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __magic_name__( self :str ) -> Matrix:
__SCREAMING_SNAKE_CASE : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Matrix:
__SCREAMING_SNAKE_CASE : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self :Any ) -> str:
return str(self.rows )
def __str__( self :List[Any] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowerCAmelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def __magic_name__( self :List[Any] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = self.rows[0:position] + [row] + self.rows[position:]
def __magic_name__( self :Tuple , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int | None = None ) -> None:
__SCREAMING_SNAKE_CASE : List[str] = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
__SCREAMING_SNAKE_CASE : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self :Optional[Any] , lowerCAmelCase__ :object ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self :Any , lowerCAmelCase__ :object ) -> bool:
return not self == other
def __neg__( self :Any ) -> Matrix:
return self * -1
def __add__( self :List[Any] , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self :Optional[int] , lowerCAmelCase__ :Matrix | int | float ) -> Matrix:
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Matrix:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __magic_name__( cls :Optional[int] , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
def decorator(SCREAMING_SNAKE_CASE : Tuple ):
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE , '''handle_key''' , SCREAMING_SNAKE_CASE )
return func
return decorator
def lowerCamelCase ( *SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def decorator(SCREAMING_SNAKE_CASE : int ):
__UpperCamelCase :Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE , '''handle_key''' , SCREAMING_SNAKE_CASE )
return func
return decorator
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __new__( cls , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = super().__new__(cls , __lowercase , __lowercase , __lowercase)
if not hasattr(__lowercase , '''key_handler'''):
setattr(__lowercase , '''key_handler''' , {})
setattr(__lowercase , '''handle_input''' , KeyHandler.handle_input)
for value in attrs.values():
__UpperCamelCase :Union[str, Any] = getattr(__lowercase , '''handle_key''' , [])
for key in handled_keys:
__UpperCamelCase :Union[str, Any] = value
return new_cls
@staticmethod
def UpperCamelCase__ ( cls) -> Optional[int]:
__UpperCamelCase :Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
__UpperCamelCase :Dict = ord(__lowercase)
__UpperCamelCase :int = cls.key_handler.get(__lowercase)
if handler:
__UpperCamelCase :int = char
return handler(cls)
else:
return None
def lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 359 | import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
UpperCamelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]:
with open(__lowercase , '''rb''' ) as f:
A: int = Image.open(__lowercase )
return im.convert('''RGB''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the training data."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
UpperCamelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
A: str = torch.stack([example['''pixel_values'''] for example in examples] )
A: int = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A: Tuple = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A: Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A: List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
A: List[Any] = {}
if data_args.train_dir is not None:
A: List[str] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
A: Any = os.path.join(data_args.validation_dir , '''**''' )
A: Dict = load_dataset(
'''imagefolder''' , data_files=__lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
A: Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
A: Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
A: List[Any] = split['''train''']
A: str = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A: Optional[int] = dataset['''train'''].features['''labels'''].names
A , A: List[str] = {}, {}
for i, label in enumerate(__lowercase ):
A: List[str] = str(__lowercase )
A: int = label
# Load the accuracy metric from the datasets package
A: Dict = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
A: List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A: int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
A: Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A: str = image_processor.size['''shortest_edge''']
else:
A: Any = (image_processor.size['''height'''], image_processor.size['''width'''])
A: str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
A: Tuple = Compose(
[
RandomResizedCrop(__lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A: Union[str, Any] = Compose(
[
Resize(__lowercase ),
CenterCrop(__lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowercase ):
A: Union[str, Any] = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(__lowercase ):
A: List[str] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A: List[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A: Tuple = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowercase )
# Initalize our trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
A: Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
A: Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A: List[str] = last_checkpoint
A: Optional[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A: Dict = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowercase )
trainer.save_metrics('''eval''' , __lowercase )
# Write model card and (optionally) push to hub
A: int = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = -1
lowerCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Any = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : int = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : List[str] = TextIteratorStreamer(UpperCAmelCase_ )
lowerCAmelCase : Any = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=UpperCAmelCase_ )
thread.start()
lowerCAmelCase : Any = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : int = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Union[str, Any] = TextStreamer(UpperCAmelCase_ , skip_prompt=UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : Any = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : str = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : List[Any] = torch.ones((1, 5) , device=UpperCAmelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Union[str, Any] = TextStreamer(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=1 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Optional[Any] = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Any = tokenizer(UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Dict = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Tuple = TextIteratorStreamer(UpperCAmelCase_ , timeout=0.0_01 )
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=UpperCAmelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = ''
for new_text in streamer:
streamer_text += new_text
| 323 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 201 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a_ : str = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
a_ : int = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
a_ : Tuple = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False ):
"""simple docstring"""
if rouge_types is None:
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCamelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase , use_stemmer=UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = scoring.BootstrapAggregator()
else:
lowerCamelCase_ = []
for ref, pred in zip(UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = scorer.score(UpperCamelCase , UpperCamelCase )
if use_aggregator:
aggregator.add_scores(UpperCamelCase )
else:
scores.append(UpperCamelCase )
if use_aggregator:
lowerCamelCase_ = aggregator.aggregate()
else:
lowerCamelCase_ = {}
for key in scores[0]:
lowerCamelCase_ = [score[key] for score in scores]
return result
| 55 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE_: List[str] =3_00 # TEMPERATURE (unit = K)
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | '''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_: Any =OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_: int =OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Any =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_: Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModel)
class __A ( _BaseAutoModelClass ):
a__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Tuple =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __A ( _BaseAutoModelClass ):
a__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_: Optional[Any] =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __A ( _BaseAutoModelClass ):
a__ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: List[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_: Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __A ( _BaseAutoModelClass ):
a__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_: int =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __A ( _BaseAutoModelClass ):
a__ : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: Dict =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __A ( _BaseAutoModelClass ):
a__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Optional[int] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __A ( _BaseAutoModelClass ):
a__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_: Union[str, Any] =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 106 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Optional[Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Dict:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight" in name:
_snake_case = 'weight'
elif "bias" in name:
_snake_case = 'bias'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> int:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_snake_case = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = 'gelu'
_snake_case = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.0_2
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1e-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(__A )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = 'Wav2Vec2FeatureExtractor'
_snake_case = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None , __A=None , __A=True ) -> List[str]:
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(__A )
else:
_snake_case = convert_config(model[0] , __A )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == 'layer' else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(__A , 'vocab.json' )
if not os.path.isdir(__A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__A ) )
return
os.makedirs(__A , exist_ok=__A )
with open(__A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __A )
_snake_case = WavaVecaCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__A , )
_snake_case = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A )
processor.save_pretrained(__A )
_snake_case = SEWForCTC(__A )
else:
_snake_case = SEWModel(__A )
feature_extractor.save_pretrained(__A )
recursively_load_weights(__A , __A , __A )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 42 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''gptsan-japanese'''
snake_case_ = [
'''past_key_values''',
]
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=36_000 , lowerCamelCase__=1_280 , lowerCamelCase__=1_024 , lowerCamelCase__=8_192 , lowerCamelCase__=4_096 , lowerCamelCase__=128 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=16 , lowerCamelCase__=16 , lowerCamelCase__=128 , lowerCamelCase__=0.0 , lowerCamelCase__=1e-5 , lowerCamelCase__=False , lowerCamelCase__=0.0 , lowerCamelCase__="float32" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.0_02 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=35_998 , lowerCamelCase__=35_995 , lowerCamelCase__=35_999 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = d_ff
__lowerCamelCase = d_ext
__lowerCamelCase = d_spout
__lowerCamelCase = num_switch_layers
__lowerCamelCase = num_ext_layers
__lowerCamelCase = num_switch_layers + num_ext_layers
__lowerCamelCase = num_heads
__lowerCamelCase = num_experts
__lowerCamelCase = expert_capacity
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = router_bias
__lowerCamelCase = router_jitter_noise
__lowerCamelCase = router_dtype
__lowerCamelCase = router_ignore_padding_tokens
__lowerCamelCase = output_hidden_states
__lowerCamelCase = output_attentions
__lowerCamelCase = initializer_factor
__lowerCamelCase = output_router_logits
__lowerCamelCase = use_cache
super().__init__(
separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 348 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =SMALL_MODEL_IDENTIFIER
A__ : Union[str, Any] ="""pt"""
A__ : Optional[Any] ="""tf"""
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : str =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
A__ : Tuple =TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCAmelCase_ )
model_tf.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : int ="""mock_framework"""
# Framework provided - return whatever the user provides
A__ : Optional[int] =FeaturesManager.determine_framework(self.test_model , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
A__ : List[Any] =FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
A__ : Tuple =FeaturesManager.determine_framework(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCAmelCase_ )
A__ : Any =FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCAmelCase_ )
A__ : int =FeaturesManager.determine_framework(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCAmelCase_ ):
A__ : List[Any] =FeaturesManager.determine_framework(lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ):
A__ : Union[str, Any] =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A__ : List[Any] =MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
A__ : List[Any] =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
A__ : Tuple =MagicMock(return_value=lowerCAmelCase_ )
A__ : str =MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
A__ : Optional[Any] =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
A__ : Optional[int] =MagicMock(return_value=lowerCAmelCase_ )
A__ : List[str] =MagicMock(return_value=lowerCAmelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , lowerCAmelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , lowerCAmelCase_ ):
with self.assertRaises(lowerCAmelCase_ ):
A__ : Tuple =FeaturesManager.determine_framework(self.test_model )
| 136 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInstructPixaPixPipeline
__snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ : str =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ : Tuple =CLIPTextModel(lowerCAmelCase_ )
A__ : int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Union[str, Any] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=0 ) -> str:
'''simple docstring'''
A__ : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : Any =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : int =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Any =self.get_dummy_components()
A__ : List[str] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Dict =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : List[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =sd_pipe(**lowerCAmelCase_ ).images
A__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Tuple =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.get_dummy_components()
A__ : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Union[str, Any] =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Optional[int] ="""french fries"""
A__ : Tuple =sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
A__ : Union[str, Any] =output.images
A__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Tuple =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : str =self.get_dummy_components()
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Union[str, Any] =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Tuple =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =[inputs["""prompt"""]] * 2
A__ : Optional[int] =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A__ : List[str] =torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
A__ : Union[str, Any] =image / 2 + 0.5
A__ : Optional[int] =image.permute(0 , 3 , 1 , 2 )
A__ : Dict =image.repeat(2 , 1 , 1 , 1 )
A__ : int =sd_pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ : List[Any] =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.get_dummy_components()
A__ : List[str] =EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A__ : str =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : int =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Optional[Any] =sd_pipe(**lowerCAmelCase_ ).images
A__ : Tuple =image[0, -3:, -3:, -1]
A__ : List[str] =[round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ : Any =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : Optional[Any] =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
A__ : Any =VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
A__ : Dict =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : str =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type="""pt""" ) )[0]
A__ : List[Any] =components["""vae"""]
A__ : Dict =self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ : List[Any] =vae.encode(inputs[image_param] ).latent_dist.mode()
A__ : Optional[Any] =pipe(**lowerCAmelCase_ )[0]
A__ : Dict =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int , lowerCAmelCase_ : int=0 ) -> List[str]:
'''simple docstring'''
A__ : List[Any] =torch.manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A__ : List[Any] ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
A__ : Optional[Any] =pipe(**lowerCAmelCase_ ).images
A__ : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Dict =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
A__ : str =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] =self.get_inputs()
A__ : Tuple =pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : List[Any] =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ )
A__ : str =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
A__ : List[str] =pipe(**lowerCAmelCase_ ).images
A__ : List[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Any =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : int =0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
A__ : Any =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ : List[str] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : Optional[Any] =latents[0, -3:, -3:, -1]
A__ : Tuple =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ : List[Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ : Dict =latents[0, -3:, -3:, -1]
A__ : List[Any] =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ : List[str] =False
A__ : Optional[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
A__ : int =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Optional[Any] =self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Dict =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
A__ : Union[str, Any] =pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : List[str] =self.get_inputs()
A__ : Dict =pipe(**lowerCAmelCase_ )
A__ : List[str] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ : int =inputs["""image"""].resize((5_04, 5_04) )
A__ : Optional[int] ="""timbrooks/instruct-pix2pix"""
A__ : List[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict =pipe(**lowerCAmelCase_ )
A__ : Dict =output.images[0]
A__ : int =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ : Dict =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 136 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
_lowercase : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowercase : Optional[int] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_lowercase : str = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _snake_case ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["wer"]
else:
lowercase_ : Any = 0
lowercase_ : Any = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = compute_measures(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 93 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 1 |
import argparse
import os
import re
_UpperCAmelCase = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCAmelCase = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
_UpperCAmelCase = re.compile(r'\s*\(\s*\"(\S[^\"]+)\"')
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = False ) -> int:
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
UpperCamelCase_ = f.read()
UpperCamelCase_ = content.split("\n" )
UpperCamelCase_ = []
UpperCamelCase_ = 0
while line_idx < len(UpperCamelCase_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase_ = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase_ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : _re_identifier.search(UpperCamelCase_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCamelCase_ ) )
elif "\n".join(UpperCamelCase_ ) != content:
return True
def lowerCAmelCase_ ( UpperCamelCase_ = False ) -> Dict:
UpperCamelCase_ = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for f in os.listdir(UpperCamelCase_ ) if f.endswith(".py" )]
UpperCamelCase_ = [sort_auto_mapping(UpperCamelCase_ , overwrite=UpperCamelCase_ ) for fname in fnames]
if not overwrite and any(UpperCamelCase_ ):
UpperCamelCase_ = [f for f, d in zip(UpperCamelCase_ , UpperCamelCase_ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(UpperCamelCase_ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_UpperCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 364 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 0 |
'''simple docstring'''
_UpperCamelCase : Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCamelCase : Tuple = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCamelCase : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCamelCase : int = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCamelCase : List[str] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCamelCase : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCamelCase : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCamelCase : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 304 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( lowerCamelCase__ ):
lowerCAmelCase__ = 'gpt_neo'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self: Tuple ,__lowerCAmelCase: List[Any]=50_257 ,__lowerCAmelCase: Optional[Any]=2_048 ,__lowerCAmelCase: List[str]=2_048 ,__lowerCAmelCase: Any=24 ,__lowerCAmelCase: Tuple=[[["global", "local"], 12]] ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: Optional[int]=None ,__lowerCAmelCase: int=256 ,__lowerCAmelCase: List[Any]="gelu_new" ,__lowerCAmelCase: int=0.0 ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: Tuple=0.0 ,__lowerCAmelCase: Any=0.1 ,__lowerCAmelCase: List[str]=1e-5 ,__lowerCAmelCase: Any=0.02 ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: int=50_256 ,__lowerCAmelCase: Optional[Any]=50_256 ,**__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Optional[int] = num_layers
_lowerCamelCase : Any = num_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Dict = window_size
_lowerCamelCase : Union[str, Any] = activation_function
_lowerCamelCase : Dict = resid_dropout
_lowerCamelCase : int = embed_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : str = classifier_dropout
_lowerCamelCase : List[str] = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : str = bos_token_id
_lowerCamelCase : Optional[int] = eos_token_id
_lowerCamelCase : Dict = attention_types
_lowerCamelCase : int = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
@staticmethod
def _lowercase ( __lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
import torch
_lowerCamelCase : Optional[int] = input.size()
_lowerCamelCase : List[Any] = len(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = shape[dimension]
_lowerCamelCase : Tuple = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : str = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1
_lowerCamelCase : int = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None]
_lowerCamelCase : str = [slice(_lowerCAmelCase )] * rank
_lowerCamelCase : Union[str, Any] = indices
_lowerCamelCase : Tuple = input[s]
_lowerCamelCase : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCAmelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
import torch
_lowerCamelCase : Dict = torch.arange(1 , _lowerCAmelCase )
_lowerCamelCase : int = torch.remainder(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = remainders == 0
_lowerCamelCase : Dict = candidates[divisor_indices]
_lowerCamelCase : Optional[Any] = torch.max(_lowerCAmelCase )
return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" )
class A_ ( lowerCamelCase__ ):
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase ,direction="inputs" )
_lowerCamelCase : int = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self._config.num_heads
def _lowercase ( self: List[str] ,__lowerCAmelCase: PreTrainedTokenizer ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: int = -1 ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[TensorType] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = super(__lowerCAmelCase ,self ).generate_dummy_inputs(
__lowerCAmelCase ,batch_size=__lowerCAmelCase ,seq_length=__lowerCAmelCase ,is_pair=__lowerCAmelCase ,framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : Tuple = seqlen + 2
_lowerCamelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Union[str, Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowerCamelCase : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : str = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCAmelCase ,__lowerCAmelCase ,dtype=__lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return 13 | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : list ) -> list:
def merge(_UpperCamelCase : list, _UpperCamelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_UpperCamelCase ) <= 1:
return collection
A_ = len(_UpperCamelCase ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : int = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 369 | '''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18 | 0 |
from __future__ import annotations
_a = 1_0
def _a ( SCREAMING_SNAKE_CASE : list[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Any = 1
__lowerCAmelCase: Optional[int] = max(SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCAmelCase: list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCAmelCase: Dict = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
__lowerCAmelCase: str = 0
for b in range(SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
__lowerCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=7, _snake_case : int=True, _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=9_9, _snake_case : Optional[Any]=3_2, _snake_case : Tuple=5, _snake_case : str=4, _snake_case : Any=3_7, _snake_case : int="gelu", _snake_case : Optional[Any]=0.1, _snake_case : str=0.1, _snake_case : str=5_1_2, _snake_case : Dict=1_6, _snake_case : str=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : Optional[int]=3, _snake_case : Union[str, Any]=4, _snake_case : Tuple=None, ) ->Optional[Any]:
snake_case__ : Optional[int] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : str = is_training
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = num_choices
snake_case__ : int = scope
snake_case__ : List[str] = self.vocab_size - 1
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : str = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Any, _snake_case : List[Any], _snake_case : Tuple, *_snake_case : Optional[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, head_mask=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : List[Any], *_snake_case : Dict ) ->Optional[int]:
snake_case__ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], *_snake_case : List[Any] ) ->Optional[int]:
snake_case__ : List[str] = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[Any], *_snake_case : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : List[str] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[Any] = config_and_inputs
snake_case__ : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : Any=False ) ->Tuple:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=_snake_case, )
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=_snake_case, )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : List[str] = OpenAIGPTModelTester(self )
snake_case__ : Any = ConfigTester(self, config_class=_snake_case, n_embd=3_7 )
def lowercase_ ( self : Optional[int] ) ->str:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->Tuple:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def lowercase_ ( self : int ) ->str:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def lowercase_ ( self : Optional[Any] ) ->str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
snake_case__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=_snake_case ) # the president is
snake_case__ : int = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case__ : Optional[int] = model.generate(_snake_case, do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist(), _snake_case )
| 277 | 0 |
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(a__ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE : Dict = True
for j in range(a__ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE : Any = False
break
if match_found:
position.append(a__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def lowercase ( self : int ) -> List[Any]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE_ ):
try:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , msg=f"""`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , msg=f"""`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE_ )
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , 'config.json' )
config_first.to_json_file(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = self.config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
config_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase ( self : Dict ) -> Any:
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = self.config_class(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), value) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCAmelCase = '\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def lowercase ( self : int ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 284 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 185 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'mgp-str'
def __init__( self , __a=[32, 1_28] , __a=4 , __a=3 , __a=27 , __a=38 , __a=5_02_57 , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=4.0 , __a=True , __a=False , __a=1e-5 , __a=0.0 , __a=0.0 , __a=0.0 , __a=False , __a=0.02 , **__a , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = max_token_length
_UpperCamelCase = num_character_labels
_UpperCamelCase = num_bpe_labels
_UpperCamelCase = num_wordpiece_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = mlp_ratio
_UpperCamelCase = distilled
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = drop_rate
_UpperCamelCase = qkv_bias
_UpperCamelCase = attn_drop_rate
_UpperCamelCase = drop_path_rate
_UpperCamelCase = output_aa_attentions
_UpperCamelCase = initializer_range
| 100 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_a = get_logger(__name__)
class _UpperCAmelCase:
def __init__( self , __a = None) -> List[str]:
'''simple docstring'''
_UpperCamelCase = (
os.path.join(__a , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__a)
return os.path.join(self.extract_dir , hash_url_to_filename(__a))
def UpperCAmelCase ( self , __a , __a) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__a) and not (os.path.isdir(__a) and os.listdir(__a))
)
def UpperCAmelCase ( self , __a , __a = False) -> str:
'''simple docstring'''
_UpperCamelCase = self.extractor.infer_extractor_format(__a)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__a)
if self._do_extract(__a , __a):
self.extractor.extract(__a , __a , __a)
return output_path
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
@abstractmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
...
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = []
@staticmethod
def UpperCAmelCase ( __a , __a) -> Any:
'''simple docstring'''
with open(__a , '''rb''') as f:
return f.read(__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if not magic_number:
_UpperCamelCase = max(len(__a) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__a , __a)
except OSError:
return False
return any(magic_number.startswith(__a) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase( lowerCamelCase ):
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__a)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
def resolved(__a) -> str:
return os.path.realpath(os.path.abspath(__a))
def badpath(__a , __a) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__a , __a)).startswith(__a)
def badlink(__a , __a) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__a , os.path.dirname(info.name)))
return badpath(info.linkname , base=__a)
_UpperCamelCase = resolved(__a)
for finfo in members:
if badpath(finfo.name , __a):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(__a , __a):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = tarfile.open(__a)
tar_file.extractall(__a , members=TarExtractor.safemembers(__a , __a))
tar_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x1F\x8B']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with gzip.open(__a , '''rb''') as gzip_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase ( cls , __a , __a = b"") -> bool:
'''simple docstring'''
if super().is_extractable(__a , magic_number=__a):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__a , '''rb''') as fp:
_UpperCamelCase = _EndRecData(__a)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__a) # CD is where we expect it to be
if len(__a) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__a , __a) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
os.makedirs(__a , exist_ok=__a)
with zipfile.ZipFile(__a , '''r''') as zip_file:
zip_file.extractall(__a)
zip_file.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with lzma.open(__a) as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(__a , exist_ok=__a)
_UpperCamelCase = rarfile.RarFile(__a)
rf.extractall(__a)
rf.close()
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__a , '''rb''') as ifh, open(__a , '''wb''') as ofh:
dctx.copy_stream(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
with bza.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(__a , exist_ok=__a)
with pyazr.SevenZipFile(__a , '''r''') as archive:
archive.extractall(__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase ( __a , __a) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(__a , '''rb''') as compressed_file:
with open(__a , '''wb''') as extracted_file:
shutil.copyfileobj(__a , __a)
class _UpperCAmelCase:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowercase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase ( cls) -> Any:
'''simple docstring'''
return max(
len(__a)
for extractor in cls.extractors.values()
if issubclass(__a , __a)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def UpperCAmelCase ( __a , __a) -> List[str]:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__a , magic_number_length=__a)
except OSError:
return b""
@classmethod
def UpperCAmelCase ( cls , __a , __a = False) -> bool:
'''simple docstring'''
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = cls.infer_extractor_format(__a)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase ( cls , __a) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__a , __a)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__a , magic_number=__a):
return extractor_format
@classmethod
def UpperCAmelCase ( cls , __a , __a , __a = None , __a = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__a) , exist_ok=__a)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__a).with_suffix('''.lock'''))
with FileLock(__a):
shutil.rmtree(__a , ignore_errors=__a)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__a , __a): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=__a , )
_UpperCamelCase = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__a , __a)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=__a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__a):
return extractor.extract(__a , __a)
| 100 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( A__ ):
_lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 189 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __a ( A__ , A__ ):
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE : int = 1_28 , SCREAMING_SNAKE_CASE : int = 2_56 , SCREAMING_SNAKE_CASE : float = 2_0_0_0.0 , SCREAMING_SNAKE_CASE : int = 7_68 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 20_48 , SCREAMING_SNAKE_CASE : float = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Optional[Any] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCamelCase__ : Optional[int] = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCamelCase__ : Optional[int] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
self.decoders.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ : Any = self.conditioning_emb(SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ : Optional[int] = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ : Dict = self.position_encoding(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCamelCase__ : Optional[Any] = self.dropout(SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCamelCase__ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ : Optional[int] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ : int = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ : List[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ : int = lyr(
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase__ : Tuple = self.decoder_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.post_dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.spec_out(SCREAMING_SNAKE_CASE )
return spec_out
class __a ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE , d_kv=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE , layer_norm_epsilon=SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.layer[0](
SCREAMING_SNAKE_CASE , conditioning_emb=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCamelCase__ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ : Tuple = self.layer[1](
SCREAMING_SNAKE_CASE , key_value_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ : Any = self.layer[-1](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (hidden_states,)
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : List[Any] = self.FiLMLayer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCamelCase__ : Optional[Any] = self.attention(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = Attention(query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , out_bias=SCREAMING_SNAKE_CASE , scale_qk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__ : str = self.layer_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.attention(
SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ : Optional[Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return layer_output
class __a ( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Any = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE , d_ff=SCREAMING_SNAKE_CASE , dropout_rate=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = TaLayerNorm(SCREAMING_SNAKE_CASE , eps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = nn.Dropout(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.layer_norm(SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCamelCase__ : Optional[int] = self.film(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.DenseReluDense(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : Tuple = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = NewGELUActivation()
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.act(self.wi_a(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Dict = self.wi_a(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = hidden_gelu * hidden_linear
UpperCamelCase__ : int = self.dropout(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.wo(SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[str] = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = eps
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __a ( nn.Module ):
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(SCREAMING_SNAKE_CASE , 3.0 )) ))
class __a ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = nn.Linear(SCREAMING_SNAKE_CASE , out_features * 2 , bias=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = self.scale_bias(SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : List[str] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCamelCase__ : Dict = x * (1 + scale) + shift
return x | 189 | 1 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : str =logging.get_logger(__name__)
_A : List[Any] ={'''vocab_file''': '''spiece.model'''}
_A : Union[str, Any] ={
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_A : int ={
'''AI-Sweden/gpt-sw3-126m''': 2_048,
'''AI-Sweden/gpt-sw3-350m''': 2_048,
'''AI-Sweden/gpt-sw3-1.6b''': 2_048,
'''AI-Sweden/gpt-sw3-6.7b''': 2_048,
'''AI-Sweden/gpt-sw3-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: Dict=False , UpperCamelCase__: List[str]=False , UpperCamelCase__: str=False , UpperCamelCase__: Dict=None , UpperCamelCase__: int=None , UpperCamelCase__: str=None , UpperCamelCase__: Any=None , UpperCamelCase__: Optional[Dict[str, Any]] = None , **UpperCamelCase__: Optional[int] , ):
lowerCamelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ : Dict = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCamelCase__ : str = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ : Any = """<|endoftext|>""" if eos_token is None else eos_token
lowerCamelCase__ : Any = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ : Optional[Any] = unk_token if pad_token is None else pad_token
lowerCamelCase__ : Any = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ : Any = """<pad>""" if pad_token is None else pad_token
lowerCamelCase__ : List[str] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase__ : Any = do_lower_case
lowerCamelCase__ : List[Any] = remove_space
lowerCamelCase__ : List[str] = keep_accents
lowerCamelCase__ : Tuple = vocab_file
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ : Optional[int] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ : Dict = re.compile(
F'''[{''.join(map(UpperCamelCase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__( self: Tuple ):
lowerCamelCase__ : List[str] = self.__dict__.copy()
lowerCamelCase__ : Dict = None
return state
def __setstate__( self: Tuple , UpperCamelCase__: Any ):
lowerCamelCase__ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self: List[Any] ):
return len(self.sp_model )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str ):
lowerCamelCase__ : int = self.non_printing_characters_re.sub("""""" , UpperCamelCase__ )
# Normalize whitespaces
lowerCamelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCamelCase__ : Dict = unicodedata.normalize("""NFC""" , UpperCamelCase__ )
return text
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , **UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[str] = self.preprocess_text(UpperCamelCase__ )
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase__: str ):
return out_string
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Union[str, Any] = """"""
lowerCamelCase__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : str = []
else:
current_sub_tokens.append(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
lowerCamelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, List[str]] , UpperCamelCase__: Union[str, bool] = False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : int = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase__ : int = self.sp_model.encode(UpperCamelCase__ )
else:
lowerCamelCase__ : Dict = [self.preprocess_text(UpperCamelCase__ ) for t in text]
lowerCamelCase__ : str = self.sp_model.encode(UpperCamelCase__ )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ : Tuple = torch.tensor(UpperCamelCase__ )
return token_ids
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[int, List[int]] ):
return self.sp_model.decode(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : List[str] = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ : Optional[int] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(UpperCamelCase__ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=UpperCamelCase__ )
| 129 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_A : str =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : int = test_results.split(""" """ )
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , UpperCamelCase ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCamelCase__ : List[str] = line
lowerCamelCase__ : int = False
return failures
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ):
lowerCamelCase__ : Union[str, Any] = title
lowerCamelCase__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0]
lowerCamelCase__ : Union[str, Any] = doc_test_results["""success"""]
lowerCamelCase__ : List[Any] = doc_test_results["""failures"""]
lowerCamelCase__ : List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ : str = doc_test_results
@property
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = [self._time_spent]
lowerCamelCase__ : Tuple = 0
for time in time_spent:
lowerCamelCase__ : Tuple = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__ ) == 1:
lowerCamelCase__ : Tuple = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F'''{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s'''
@property
def lowerCamelCase_ ( self: Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = 40
lowerCamelCase__ : List[str] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase__ : List[Any] = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCamelCase__ : Any = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
lowerCamelCase__ : List[str] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = """"""
for key, value in failures.items():
lowerCamelCase__ : int = value[:200] + """ [Truncated]""" if len(UpperCamelCase__ ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowerCamelCase__ : Tuple = job_name
lowerCamelCase__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowerCamelCase__ : Union[str, Any] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase_ ( self: Tuple ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCamelCase__ : int = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCamelCase__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCamelCase__ : Union[str, Any] = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase__ : Union[str, Any] = job_result["""failures"""]
lowerCamelCase__ : int = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Any = os.environ["""GITHUB_RUN_ID"""]
lowerCamelCase__ : List[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase__ : Optional[int] = requests.get(UpperCamelCase ).json()
lowerCamelCase__ : List[Any] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
lowerCamelCase__ : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , UpperCamelCase )
return {}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : int = {}
if os.path.exists(UpperCamelCase ):
lowerCamelCase__ : List[str] = os.listdir(UpperCamelCase )
for file in files:
try:
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , encoding="""utf-8""" ) as f:
lowerCamelCase__ : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(UpperCamelCase , UpperCamelCase )}.''' ) from e
return _artifact
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : Any = name
lowerCamelCase__ : Union[str, Any] = []
def __str__( self: int ):
return self.name
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str ):
self.paths.append({"""name""": self.name, """path""": path} )
lowerCamelCase__ : Dict[str, Artifact] = {}
lowerCamelCase__ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ : Union[str, Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ : Optional[int] = Artifact(UpperCamelCase )
_available_artifacts[artifact_name].add_path(UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
_A : Any =get_job_links()
_A : str =retrieve_available_artifacts()
_A : int =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_A : Union[str, Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_A : Union[str, Any] =github_actions_job_links.get('''run_doctests''')
_A : Any =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_A : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_A , _A , _A : Optional[int] =handle_test_results(artifact['''stats'''])
_A : Union[str, Any] =failed
_A : int =success
_A : Optional[int] =time_spent[1:-1] + ''', '''
_A : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_A : List[Any] =line.replace('''FAILED ''', '''''')
_A : Any =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_A , _A : Any =line.split('''::''')
else:
_A , _A : Tuple =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_A : str =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_A : str =all_failures[test] if test in all_failures else '''N/A'''
_A : Tuple =failure
break
_A : Union[str, Any] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 129 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( __lowercase ):
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """num_attention_heads""" ) )
class _A :
def __init__( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=13 , __magic_name__ : List[str]=32 , __magic_name__ : Optional[int]=2 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[int]=6_40 , __magic_name__ : Dict=4 , __magic_name__ : Tuple="silu" , __magic_name__ : Optional[int]=3 , __magic_name__ : Any=32 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : List[Any]=True , __magic_name__ : Any=True , __magic_name__ : str=10 , __magic_name__ : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = parent
__snake_case : List[str] = batch_size
__snake_case : Any = image_size
__snake_case : List[Any] = patch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Dict = last_hidden_size
__snake_case : Dict = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : int = conv_kernel_size
__snake_case : Tuple = output_stride
__snake_case : str = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : Any = use_labels
__snake_case : str = is_training
__snake_case : int = num_labels
__snake_case : int = initializer_range
__snake_case : int = scope
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = MobileViTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : Optional[Any] = MobileViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.num_labels
__snake_case : str = MobileViTForSemanticSegmentation(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : Tuple = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__: Optional[int] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__: Union[str, Any] = False
lowercase__: Optional[int] = False
lowercase__: int = False
lowercase__: int = False
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : Union[str, Any] = MobileViTConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__magic_name__ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Optional[int] ):
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : Union[str, Any] = 5
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Union[str, Any] = 2
for i in range(len(__magic_name__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@slow
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = MobileViTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(__magic_name__ )
__snake_case : Tuple = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : int = model.to(__magic_name__ )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : Union[str, Any] = prepare_img()
__snake_case : int = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
__snake_case : Optional[int] = outputs.logits
# verify the logits
__snake_case : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __magic_name__ )
__snake_case : Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__magic_name__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : Tuple = model.to(__magic_name__ )
__snake_case : Any = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case : List[str] = prepare_img()
__snake_case : List[str] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] )
__snake_case : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
__snake_case : str = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ )
__snake_case : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __magic_name__ )
| 13 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""Translation""" ,init=lowercase__ ,repr=lowercase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[List] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""TranslationVariableLanguages""" ,init=lowercase__ ,repr=lowercase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase_ : List[str] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = set(self.languages )
if self.languages and set(lowercase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowercase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowercase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase_ : Tuple = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = zip(*sorted(lowercase_ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 61 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 45 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
A__ = value
else:
A__ = value
return new_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
'''simple docstring'''
A__ = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
'''simple docstring'''
A__ , A__ = image.size
A__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 800 if 'detection' in checkpoint_url else 1000
A__ = target_max_size / current_max_size
A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
A__ = F.to_tensor(SCREAMING_SNAKE_CASE__ )
A__ = F.normalize(SCREAMING_SNAKE_CASE__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
logger.info('Converting model...' )
# load original state dict
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = rename_backbone_keys(SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
# create HuggingFace model and load state dict
A__ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A__ = 15
A__ = 2
A__ = {0: 'table', 1: 'table rotated'}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 125
A__ = 6
A__ = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
A__ = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion
A__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
A__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=SCREAMING_SNAKE_CASE__ )
A__ = Image.open(SCREAMING_SNAKE_CASE__ ).convert('RGB' )
A__ = normalize(resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 )
A__ = model(SCREAMING_SNAKE_CASE__ )
if "detection" in checkpoint_url:
A__ = (1, 15, 3)
A__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A__ = (1, 125, 7)
A__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
A__ = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Union[str, Any] , _A :List[Any]=0.01 , _A :Optional[Any]=1_000 ) -> Tuple:
'''simple docstring'''
__A = p_stop
__A = max_length
def __iter__( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = 0
__A = False
while not stop and count < self.max_length:
yield count
count += 1
__A = random.random() < self.p_stop
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] , _A :Tuple , _A :int , _A :Tuple=False , _A :str=True ) -> Optional[int]:
'''simple docstring'''
__A = [
BatchSamplerShard(_A , 2 , _A , split_batches=_A , even_batches=_A )
for i in range(2 )
]
__A = [list(_A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_A ) for shard in batch_sampler_shards] , [len(_A ) for e in expected] )
self.assertListEqual(_A , _A )
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
def lowercase_ ( self :Tuple ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
__A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__A = [BatchSamplerShard(_A , 2 , _A , even_batches=_A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self :int , _A :Optional[Any] , _A :List[str] , _A :Dict , _A :Any=False , _A :str=2 , _A :Any=False ) -> Dict:
'''simple docstring'''
random.seed(_A )
__A = list(_A )
__A = [
IterableDatasetShard(
_A , batch_size=_A , drop_last=_A , num_processes=_A , process_index=_A , split_batches=_A , )
for i in range(_A )
]
__A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_A )
iterable_dataset_lists.append(list(_A ) )
__A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(len(_A ) % shard_batch_size == 0 )
__A = []
for idx in range(0 , len(_A ) , _A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_A ) < len(_A ):
reference += reference
self.assertListEqual(_A , reference[: len(_A )] )
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = 42
__A = RandomIterableDataset()
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
# Edge case with a very small dataset
__A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(16 ) , batch_size=4 , drop_last=_A )
__A = SkipBatchSampler(_A , 2 )
self.assertListEqual(list(_A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
__A = DataLoader(list(range(16 ) ) , batch_size=4 )
__A = skip_first_batches(_A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self :Dict ) -> Any:
'''simple docstring'''
Accelerator()
__A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 161 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _UpperCamelCase (a__ :str , a__ :str , **a__ :List[str] ):
"""simple docstring"""
UpperCamelCase__ = AutoConfig.from_pretrained(a__ , **a__ )
UpperCamelCase__ = AutoModelForSeqaSeqLM.from_config(a__ )
model.save_pretrained(a__ )
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 87 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """masked_bert"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="topK" , __lowerCAmelCase="constant" , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 87 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> bool:
if len(snake_case_ ) == 0:
return False
__snake_case = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by comma:\n').strip()
snake_case_ = [int(item.strip()) for item in user_input.split(',')]
snake_case_ = int(input('Enter the number to be found in the list:\n').strip())
snake_case_ = '' if binary_search(sequence, target) else 'not '
print(F'{target} was {not_str}found in {sequence}')
| 24 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE )]
UpperCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams)) | 267 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 12
@property
def UpperCamelCase__ ( self ):
return 32
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
snake_case_ = TransformeraDModel(**_UpperCAmelCase )
return model
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=_UpperCAmelCase )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu'''
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=_UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
snake_case_ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = '''teddy bear playing in the pool'''
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
snake_case_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
snake_case_ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
snake_case_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 267 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_:int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : str = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_:List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = list(s_dict.keys() )
for key in keys:
A : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A : List[Any] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f'''{key} -> {new_key}''' )
A : List[str] = s_dict.pop(_lowerCAmelCase )
return s_dict
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : int = emb.weight.shape
A : Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A : Optional[Any] = emb.weight.data
return lin_layer
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
A : List[Any] = os.path.basename(_lowerCAmelCase )
A : List[str] = url.split("""/""" )[-2]
A : str = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_lowerCAmelCase ):
A : Dict = open(_lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
A : int = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
A : Any = open(_lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
A : Dict = _download(_MODELS[checkpoint_path] )
else:
A : Any = torch.load(_lowerCAmelCase , map_location="""cpu""" )
A : List[str] = original_checkpoint["dims"]
A : Any = original_checkpoint["model_state_dict"]
A : Optional[Any] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
A : int = True
A : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
A : Optional[int] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A : Tuple = WhisperForConditionalGeneration(_lowerCAmelCase )
A : List[str] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
A : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A : Optional[int] = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_:str = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def lowerCamelCase__ ( A__ : str , A__ : int=None , A__ : Union[str, Any]=None , A__ : Tuple=None ):
'''simple docstring'''
__lowerCamelCase = True
while ask_again:
__lowerCamelCase = input(A__ )
try:
if default is not None and len(A__ ) == 0:
return default
return convert_value(A__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A__ )
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=[] , A__ : int=None , A__ : Tuple=0 ):
'''simple docstring'''
__lowerCamelCase = BulletMenu(A__ , A__ )
__lowerCamelCase = menu.run(default_choice=A__ )
return convert_value(A__ ) if convert_value is not None else result
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = int(A__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase__( argparse.RawDescriptionHelpFormatter):
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super()._format_usage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 29 |
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] )->int:
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Dict , lowerCAmelCase_ :Dict )->Optional[Any]:
'''simple docstring'''
snake_case_ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase__ , lowerCamelCase__ )
# Predict target for test data
snake_case_ = xgb.predict(lowerCamelCase__ )
snake_case_ = predictions.reshape(len(lowerCamelCase__ ) , 1 )
return predictions
def _lowerCAmelCase ( )->List[str]:
'''simple docstring'''
snake_case_ = fetch_california_housing()
snake_case_ , snake_case_ = data_handling(lowerCamelCase__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.2_5 , random_state=1 )
snake_case_ = xgboost(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCamelCase__ , lowerCamelCase__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCamelCase__ , lowerCamelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 159 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
import functools
from typing import Any
def a( A : str , A : List[Any] ) -> Any:
"""simple docstring"""
if not isinstance(A , A ) or len(A ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(A , A ) or not all(
isinstance(A , A ) and len(A ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
a = {}
a = '''WORD_KEEPER'''
for word in words:
a = trie
for c in word:
if c not in trie_node:
a = {}
a = trie_node[c]
a = True
a = len(A )
# Dynamic programming method
@functools.cache
def is_breakable(A : str ) -> bool:
if index == len_string:
return True
a = trie
for i in range(A , A ):
a = trie_node.get(string[i] , A )
if trie_node is None:
return False
if trie_node.get(A , A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = [e.name for e in KarrasDiffusionSchedulers]
__A = 2
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if schedule_timesteps is None:
a = self.timesteps
a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
a = self.sigmas[step_index]
else:
a = self.sigmas_interpol[step_index]
a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = num_inference_steps
a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a = torch.cat([timesteps[:1], interleaved_timesteps] )
a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = sigma.log()
# get distribution
a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a = low_idx + 1
a = self.log_sigmas[low_idx]
a = self.log_sigmas[high_idx]
# interpolate sigmas
a = (low - log_sigma) / (low - high)
a = w.clamp(0 , 1 )
# transform interpolation to time range
a = (1 - w) * low_idx + w * high_idx
a = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a = self.sigmas[step_index]
a = self.sigmas_interpol[step_index + 1]
a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a = self.sigmas[step_index - 1]
a = self.sigmas_interpol[step_index]
a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a = 0
a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a = sigma_interpol - sigma_hat
# store for 2nd order step
a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a = sigma_next - sigma_hat
a = self.sample
a = None
a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a = self.timesteps.to(original_samples.device )
a = timesteps.to(original_samples.device )
a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a = sigma.unsqueeze(-1 )
a = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCamelCase = 8
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple=BITS ) -> Tuple:
__snake_case : int = x.device
__snake_case : List[Any] = (x * 255).int().clamp(0 , 255 )
__snake_case : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase )
__snake_case : Tuple = rearrange(lowercase , "d -> d 1 1" )
__snake_case : Any = rearrange(lowercase , "b c h w -> b c 1 h w" )
__snake_case : str = ((x & mask) != 0).float()
__snake_case : Optional[int] = rearrange(lowercase , "b c d h w -> b (c d) h w" )
__snake_case : Optional[Any] = bits * 2 - 1
return bits
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any]=BITS ) -> str:
__snake_case : str = x.device
__snake_case : Any = (x > 0).int()
__snake_case : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase , dtype=torch.intaa )
__snake_case : Optional[int] = rearrange(lowercase , "d -> d 1 1" )
__snake_case : Optional[Any] = rearrange(lowercase , "b (c d) h w -> b c d h w" , d=8 )
__snake_case : Tuple = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCAmelCase__( self : Union[str, Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : float = 0.0 , lowercase : bool = True , lowercase : Tuple=None , lowercase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case : Any = self.alphas_cumprod[timestep]
__snake_case : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case : Optional[int] = self.bit_scale
if self.config.clip_sample:
__snake_case : Union[str, Any] = torch.clamp(lowercase , -scale , lowercase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case : List[str] = self._get_variance(lowercase , lowercase )
__snake_case : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : Dict = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case : List[Any] = model_output.device if torch.is_tensor(lowercase ) else "cpu"
__snake_case : List[str] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase ).to(lowercase )
__snake_case : Tuple = self._get_variance(lowercase , lowercase ) ** 0.5 * eta * noise
__snake_case : str = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def lowerCAmelCase__( self : List[str] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Optional[Any]="epsilon" , lowercase : Optional[int]=None , lowercase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case : Any = torch.split(lowercase , sample.shape[1] , dim=1 )
else:
__snake_case : Any = None
# 1. compute alphas, betas
__snake_case : str = self.alphas_cumprod[t]
__snake_case : int = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case : str = 1 - alpha_prod_t
__snake_case : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case : List[Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__snake_case : int = self.bit_scale
if self.config.clip_sample:
__snake_case : Optional[int] = torch.clamp(lowercase , -scale , lowercase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Dict = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case : Union[str, Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case : int = 0
if t > 0:
__snake_case : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase ).to(model_output.device )
__snake_case : Optional[int] = (self._get_variance(lowercase , predicted_variance=lowercase ) ** 0.5) * noise
__snake_case : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1.0 , ) -> List[str]:
'''simple docstring'''
super().__init__()
__snake_case : Optional[int] = bit_scale
__snake_case : Tuple = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 256 , UpperCAmelCase = 256 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = "pil" , UpperCAmelCase = True , **UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
__snake_case : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
__snake_case : List[Any] = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
__snake_case : Dict = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case : Optional[Any] = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Any = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__snake_case : List[str] = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
__snake_case : Optional[Any] = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 326 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ) -> Tuple:
# Load configuration defined in the metadata file
with open(lowercase ) as metadata_file:
__snake_case : int = json.load(lowercase )
__snake_case : Optional[int] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : List[Any] = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Tuple = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
__snake_case : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : Optional[int] = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
__snake_case : Any = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : Tuple = json.load(lowercase )
__snake_case : List[Any] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
__snake_case : Any = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
__snake_case : str = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : List[Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : List[Any] = state_dict[bias_name]
__snake_case : Optional[int] = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : int = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Dict = f"""encoder.layer.{layer_index}.attention.self."""
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
__snake_case : str = state_dict[prefix + matrix_name]
__snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : str = state_dict[key]
else:
__snake_case : str = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : int = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
__snake_case : Tuple = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Union[str, Any] = (0, 9)
__snake_case : Optional[int] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Any = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Optional[Any] = torch.Size((1, 33, 768) )
__snake_case : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 1, 768) )
__snake_case : int = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : str = MLukeTokenizer.from_pretrained(lowercase )
__snake_case : Dict = "Tokyo is the capital of <mask>."
__snake_case : Union[str, Any] = (24, 30)
__snake_case : int = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
__snake_case : int = model(**lowercase )
__snake_case : Dict = encoding["input_ids"][0].tolist()
__snake_case : Dict = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
__snake_case : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
__snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def lowerCAmelCase__( lowercase : Optional[int] ) -> List[Any]:
__snake_case : Any = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Any = [json.loads(lowercase ) for line in open(lowercase )]
__snake_case : Any = {}
for entry in data:
__snake_case : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Optional[int] = entity_id
break
__snake_case : Union[str, Any] = f"""{language}:{entity_name}"""
__snake_case : Any = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 1 |
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = abs(lowercase__ )
lowerCAmelCase_ : int = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = abs(lowercase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
return sum(int(lowercase__ ) for c in str(abs(lowercase__ ) ) )
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ : Callable , lowercase__ : int ) -> None:
lowerCAmelCase_ : Union[str, Any] = f'{func.__name__}({value})'
lowerCAmelCase_ : Any = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(lowercase__ )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 354 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
def lowercase_ ( _A : int = 600851475143 ):
"""simple docstring"""
try:
lowerCamelCase__ : Tuple = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : List[str] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCamelCase__ : int = i
while n % i == 0:
lowerCamelCase__ : Union[str, Any] = n // i
i += 1
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 184 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , ):
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Dict = (image_size // patch_size) ** 2
_UpperCAmelCase : Tuple = num_patches + 1
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Any = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.type_sequence_label_size
_UpperCAmelCase : List[Any] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[int] = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( _snake_case , _snake_case , unittest.TestCase ):
_lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowercase = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_lowercase = True
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ViTModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.default_image_processor
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# verify the logits
_UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCamelCase__ )
_UpperCAmelCase : Any = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : List[str] = image_processor(images=UpperCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
_UpperCAmelCase : Tuple = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Dict = image_processor(images=UpperCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : List[str] = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCAmelCase : str = model(UpperCamelCase__ )
| 363 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a ( UpperCAmelCase ):
def __get__( self , A_ , A_=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
_UpperCAmelCase : Optional[int] = "__cached_" + self.fget.__name__
_UpperCAmelCase : Union[str, Any] = getattr(A_ , A_ , A_ )
if cached is None:
_UpperCAmelCase : Dict = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> int:
_UpperCAmelCase : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> int:
if is_torch_fx_proxy(lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Dict:
return isinstance(lowerCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> Any:
return _is_numpy(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[int]:
import torch
return isinstance(lowerCAmelCase , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> List[Any]:
import torch
return isinstance(lowerCAmelCase , torch.device )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Tuple:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , lowerCAmelCase ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase , lowerCAmelCase )
else:
return False
return isinstance(lowerCAmelCase , torch.dtype )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> int:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Optional[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Optional[Any]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCAmelCase )
return type(lowerCAmelCase ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[str]:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> str:
return False if not is_flax_available() else _is_jax(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return [to_py_obj(lowerCAmelCase ) for o in obj]
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase ).tolist()
elif isinstance(lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[Any]:
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return np.array(lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase )
else:
return obj
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
_UpperCAmelCase : Any = getattr(self , class_fields[0].name )
_UpperCAmelCase : List[str] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = first_field.items()
_UpperCAmelCase : Optional[int] = True
else:
try:
_UpperCAmelCase : Tuple = iter(A_ )
_UpperCAmelCase : Any = True
except TypeError:
_UpperCAmelCase : str = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
_UpperCAmelCase : Tuple = first_field
else:
for field in class_fields:
_UpperCAmelCase : int = getattr(self , field.name )
if v is not None:
_UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A_ , A_ ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self , A_ , A_ ):
'''simple docstring'''
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class a ( UpperCAmelCase , UpperCAmelCase ):
@classmethod
def _UpperCAmelCase ( cls , A_ ):
'''simple docstring'''
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class a ( UpperCAmelCase ):
_lowercase = "longest"
_lowercase = "max_length"
_lowercase = "do_not_pad"
class a ( UpperCAmelCase ):
_lowercase = "pt"
_lowercase = "tf"
_lowercase = "np"
_lowercase = "jax"
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = context_managers
_UpperCAmelCase : Dict = ExitStack()
def __enter__( self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self , *A_ , **A_ ):
'''simple docstring'''
self.stack.__exit__(*A_ , **A_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = infer_framework(lowerCAmelCase )
if framework == "tf":
_UpperCAmelCase : Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> List[str]:
_UpperCAmelCase : List[Any] = model_class.__name__
_UpperCAmelCase : Dict = infer_framework(lowerCAmelCase )
if framework == "tf":
_UpperCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: MutableMapping , lowerCAmelCase: str = "" , lowerCAmelCase: str = "." ) -> List[Any]:
def _flatten_dict(lowerCAmelCase: int , lowerCAmelCase: Tuple="" , lowerCAmelCase: List[str]="." ):
for k, v in d.items():
_UpperCAmelCase : Optional[int] = str(lowerCAmelCase ) + delimiter + str(lowerCAmelCase ) if parent_key else k
if v and isinstance(lowerCAmelCase , lowerCAmelCase ):
yield from flatten_dict(lowerCAmelCase , lowerCAmelCase , delimiter=lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: bool = False ) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Tuple=None ) -> List[str]:
if is_numpy_array(lowerCAmelCase ):
return np.transpose(lowerCAmelCase , axes=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.T if axes is None else array.permute(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase , perm=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.transpose(lowerCAmelCase , axes=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for transpose: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any ) -> int:
if is_numpy_array(lowerCAmelCase ):
return np.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.reshape(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.reshape(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(F'Type not supported for reshape: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Union[str, Any]=None ) -> Union[str, Any]:
if is_numpy_array(lowerCAmelCase ):
return np.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for squeeze: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: List[str] ) -> Union[str, Any]:
if is_numpy_array(lowerCAmelCase ):
return np.expand_dims(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.unsqueeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> int:
if is_numpy_array(lowerCAmelCase ):
return np.size(lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.size(lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: List[Any] ) -> List[Any]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase , (tuple, list) ):
_UpperCAmelCase : List[Any] = [F'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : Tuple = F'{repo_id}--{value}'
return auto_map
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> List[Any]:
for base_class in inspect.getmro(lowerCAmelCase ):
_UpperCAmelCase : int = base_class.__module__
_UpperCAmelCase : Dict = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 189 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes"))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "neck_hidden_sizes"))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads"))
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=13 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[str]=640 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Optional[int]="silu" , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=None , ):
SCREAMING_SNAKE_CASE_: Any = parent
SCREAMING_SNAKE_CASE_: List[Any] = batch_size
SCREAMING_SNAKE_CASE_: Tuple = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: List[Any] = num_channels
SCREAMING_SNAKE_CASE_: str = last_hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] = conv_kernel_size
SCREAMING_SNAKE_CASE_: str = output_stride
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = classifier_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_: Any = num_labels
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: List[str] = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: str = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
SCREAMING_SNAKE_CASE_: List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_: str = MobileViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Any = self.num_labels
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = MobileViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = MobileViTConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings")
def _SCREAMING_SNAKE_CASE ( self : Dict):
pass
@unittest.skip(reason="MobileViT does not output attentions")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
def check_hidden_states_output(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.hidden_states
SCREAMING_SNAKE_CASE_: Any = 5
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for i in range(len(lowerCAmelCase__)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[int] = MobileViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[str] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = torch.tensor([-1.9364, -1.2327, -0.4653]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Optional[Any] = model.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Dict = model.to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
SCREAMING_SNAKE_CASE_: Dict = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_: Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(50, 60)])
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , lowerCAmelCase__)
| 13 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _UpperCamelCase (a__ :Optional[Any] , a__ :Union[str, Any] , a__ :str , a__ :List[str] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCamelCase__ = TOKENIZER_CLASSES
else:
UpperCamelCase__ = {tokenizer_name: getattr(a__ , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCamelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase__ = True
if checkpoint_name is None:
UpperCamelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCamelCase__ = tokenizer_class.from_pretrained(a__ , force_download=a__ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase__ , UpperCamelCase__ = checkpoint.split("""/""" )
UpperCamelCase__ = os.path.join(a__ , a__ )
elif add_prefix:
UpperCamelCase__ = checkpoint
UpperCamelCase__ = dump_path
else:
UpperCamelCase__ = None
UpperCamelCase__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase__ = file_path.split(a__ )[-1][0]
if next_char == "/":
UpperCamelCase__ = os.path.join(a__ , a__ )
UpperCamelCase__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCamelCase__ = tokenizer.save_pretrained(
a__ , legacy_format=a__ , filename_prefix=a__ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(a__ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCamelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 87 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCamelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCamelCase__ = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1 , ):
super().__init__()
UpperCamelCase__ = nn.Convad(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , bias=__lowerCAmelCase , dilation=__lowerCAmelCase , )
UpperCamelCase__ = nn.BatchNormad(__lowerCAmelCase )
UpperCamelCase__ = nn.ReLU()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.conv(__lowerCAmelCase )
UpperCamelCase__ = self.batch_norm(__lowerCAmelCase )
UpperCamelCase__ = self.activation(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = [
nn.AdaptiveAvgPoolad(__lowerCAmelCase ),
UperNetConvModule(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = input
for layer in self.layers:
UpperCamelCase__ = layer(__lowerCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = pool_scales
UpperCamelCase__ = align_corners
UpperCamelCase__ = in_channels
UpperCamelCase__ = channels
UpperCamelCase__ = []
for i, pool_scale in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = UperNetPyramidPoolingBlock(pool_scale=__lowerCAmelCase , in_channels=__lowerCAmelCase , channels=__lowerCAmelCase )
self.blocks.append(__lowerCAmelCase )
self.add_module(str(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for ppm in self.blocks:
UpperCamelCase__ = ppm(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(__lowerCAmelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase__ = in_channels
UpperCamelCase__ = config.hidden_size
UpperCamelCase__ = False
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase__ = UperNetConvModule(__lowerCAmelCase , self.channels , kernel_size=1 )
UpperCamelCase__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCAmelCase )
self.fpn_convs.append(__lowerCAmelCase )
UpperCamelCase__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = inputs[-1]
UpperCamelCase__ = [x]
psp_outs.extend(self.psp_modules(__lowerCAmelCase ) )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.bottleneck(__lowerCAmelCase )
return output
def _lowerCamelCase ( self , __lowerCAmelCase ):
# build laterals
UpperCamelCase__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCAmelCase ) )
# build top-down path
UpperCamelCase__ = len(__lowerCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = laterals[i - 1].shape[2:]
UpperCamelCase__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
UpperCamelCase__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
UpperCamelCase__ = torch.cat(__lowerCAmelCase , dim=1 )
UpperCamelCase__ = self.fpn_bottleneck(__lowerCAmelCase )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 ):
super().__init__()
UpperCamelCase__ = config
UpperCamelCase__ = config.auxiliary_in_channels
UpperCamelCase__ = config.auxiliary_channels
UpperCamelCase__ = config.auxiliary_num_convs
UpperCamelCase__ = config.auxiliary_concat_input
UpperCamelCase__ = in_index
UpperCamelCase__ = (kernel_size // 2) * dilation
UpperCamelCase__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=__lowerCAmelCase , dilation=__lowerCAmelCase ) )
if self.num_convs == 0:
UpperCamelCase__ = nn.Identity()
else:
UpperCamelCase__ = nn.Sequential(*__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCAmelCase , padding=kernel_size // 2 )
UpperCamelCase__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowerCamelCase ( self ):
self.apply(self._init_weights )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCamelCase ( self , __lowerCAmelCase ):
# just take the relevant feature maps
UpperCamelCase__ = encoder_hidden_states[self.in_index]
UpperCamelCase__ = self.convs(__lowerCAmelCase )
if self.concat_input:
UpperCamelCase__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase__ = self.classifier(__lowerCAmelCase )
return output
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Any = UperNetConfig
snake_case : List[Any] = """pixel_values"""
snake_case : Optional[Any] = True
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCamelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = value
UpperCamelCase__ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase__ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _a , )
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase__ = UperNetHead(__lowerCAmelCase , in_channels=self.backbone.channels )
UpperCamelCase__ = UperNetFCNHead(__lowerCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def _lowerCamelCase ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase__ = self.backbone.forward_with_filtered_kwargs(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , output_attentions=__lowerCAmelCase )
UpperCamelCase__ = outputs.feature_maps
UpperCamelCase__ = self.decode_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if self.auxiliary_head is not None:
UpperCamelCase__ = self.auxiliary_head(__lowerCAmelCase )
UpperCamelCase__ = nn.functional.interpolate(
__lowerCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=__lowerCAmelCase )
UpperCamelCase__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
UpperCamelCase__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase__ = (logits,) + outputs[1:]
else:
UpperCamelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 87 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
_UpperCAmelCase = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'stabilityai/stable-diffusion-2'
_UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
_UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision='bf16' , dtype=jnp.bfloataa , )
_UpperCAmelCase = scheduler_params
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
_UpperCAmelCase = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 329 | from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
UpperCamelCase_ : Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
UpperCamelCase_ : Dict = TensorFlowBenchmark(args=lowerCamelCase )
try:
UpperCamelCase_ : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase_ : Any = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase_ : Optional[int] = ' '.join(str(lowerCamelCase ).split(' ' )[:-1] )
UpperCamelCase_ : Any = ''
UpperCamelCase_ : Any = eval(str(lowerCamelCase ).split(' ' )[-1] )
UpperCamelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = full_error_msg + begin_error_msg + str(lowerCamelCase )
raise ValueError(lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 175 | 0 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 363 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
def run_func(snake_case__ ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> ["tf.Tensor"]:
__UpperCAmelCase : str = random.Random()
__UpperCAmelCase : str = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__, shape=(batch_size, sequence_length), dtype=tf.intaa )
class _snake_case ( _lowercase ):
lowerCamelCase__: TensorFlowBenchmarkArguments
lowerCamelCase__: PretrainedConfig
lowerCamelCase__: str = "TensorFlow"
@property
def _lowerCamelCase ( self: int ) -> Any:
return tf.__version__
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
# initialize GPU on separate process
__UpperCAmelCase : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
__UpperCAmelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : Dict = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : int = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : int = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Dict = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : str = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : int = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[str] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Any = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : Tuple = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Optional[Any] = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[Any] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : Optional[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : List[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
__UpperCAmelCase : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase : List[str] = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__UpperCAmelCase : Union[str, Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__UpperCAmelCase : Union[str, Any] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCAmelCase : List[Any] = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
__UpperCAmelCase : List[Any] = meminfo.used
__UpperCAmelCase : List[Any] = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__UpperCAmelCase : Tuple = None
else:
__UpperCAmelCase : str = measure_peak_memory_cpu(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase : str = stop_memory_tracing(__lowerCamelCase )
if memory is None:
__UpperCAmelCase : Tuple = summary.total
else:
__UpperCAmelCase : Union[str, Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> XGBClassifier:
'''simple docstring'''
lowercase_ = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
lowercase_ = load_iris()
lowercase_ , lowercase_ = data_handling(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
lowercase_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
lowercase_ = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 313 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
lowercase_ , lowercase_ = np.shape(__lowerCAmelCase )
if rows != columns:
lowercase_ = (
"""'table' has to be of square shaped array but got a """
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__lowerCAmelCase )
lowercase_ = np.zeros((rows, columns) )
lowercase_ = np.zeros((rows, columns) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase_ = (table[i][j] - total) / upper[j][j]
lowercase_ = 1
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = sum(lower[i][k] * upper[k][j] for k in range(__lowerCAmelCase ) )
lowercase_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''bert'''
def __init__( self : Union[str, Any] , A_ : Any=30522 , A_ : List[str]=768 , A_ : Dict=12 , A_ : str=12 , A_ : List[str]=3072 , A_ : List[str]="gelu" , A_ : Any=0.1 , A_ : Union[str, Any]=0.1 , A_ : List[Any]=512 , A_ : Dict=2 , A_ : Optional[Any]=0.02 , A_ : Union[str, Any]=1E-12 , A_ : Optional[Any]=0 , A_ : int="absolute" , A_ : str=True , A_ : Union[str, Any]=None , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class A( UpperCamelCase ):
'''simple docstring'''
@property
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 204 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase : int = input("Enter image url: ").strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase : Tuple = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase : int = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase : Dict = requests.get(image_url).content
lowerCamelCase : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 204 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=True, _UpperCAmelCase="pt" ):
__UpperCAmelCase : Union[str, Any] = {"add_prefix_space": True} if isinstance(_lowerCAmelCase, _lowerCAmelCase ) and not line.startswith(" " ) else {}
__UpperCAmelCase : Dict = padding_side
return tokenizer(
[line], max_length=_lowerCAmelCase, padding="max_length" if pad_to_max_length else None, truncation=_lowerCAmelCase, return_tensors=_lowerCAmelCase, add_special_tokens=_lowerCAmelCase, **_lowerCAmelCase, )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, ):
__UpperCAmelCase : Optional[Any] = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any="train" , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any="" , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : List[str] = Path(UpperCAmelCase_ ).joinpath(type_path + ".source" )
__UpperCAmelCase : Any = Path(UpperCAmelCase_ ).joinpath(type_path + ".target" )
__UpperCAmelCase : str = self.get_char_lens(self.src_file )
__UpperCAmelCase : Tuple = max_source_length
__UpperCAmelCase : int = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__UpperCAmelCase : str = tokenizer
__UpperCAmelCase : List[Any] = prefix
if n_obs is not None:
__UpperCAmelCase : List[str] = self.src_lens[:n_obs]
__UpperCAmelCase : Any = src_lang
__UpperCAmelCase : Tuple = tgt_lang
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[str] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = index + 1 # linecache starts at 1
__UpperCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase_ ).rstrip("\n" )
__UpperCAmelCase : Union[str, Any] = linecache.getline(str(self.tgt_file ) , UpperCAmelCase_ ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCAmelCase : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer
)
__UpperCAmelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer
__UpperCAmelCase : int = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_source_length , "right" )
__UpperCAmelCase : Optional[int] = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_target_length , "right" )
__UpperCAmelCase : str = source_inputs["input_ids"].squeeze()
__UpperCAmelCase : Union[str, Any] = target_inputs["input_ids"].squeeze()
__UpperCAmelCase : Dict = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return [len(UpperCAmelCase_ ) for x in Path(UpperCAmelCase_ ).open().readlines()]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = torch.stack([x["input_ids"] for x in batch] )
__UpperCAmelCase : Tuple = torch.stack([x["attention_mask"] for x in batch] )
__UpperCAmelCase : int = torch.stack([x["decoder_input_ids"] for x in batch] )
__UpperCAmelCase : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_ )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_ )
else self.tokenizer.pad_token_id
)
__UpperCAmelCase : Optional[Any] = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
__UpperCAmelCase : str = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase__ : Tuple = getLogger(__name__)
def __UpperCamelCase ( _UpperCAmelCase ):
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = get_git_info()
save_json(_lowerCAmelCase, os.path.join(_lowerCAmelCase, "git_log.json" ) )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=4, **_UpperCAmelCase ):
with open(_lowerCAmelCase, "w" ) as f:
json.dump(_lowerCAmelCase, _lowerCAmelCase, indent=_lowerCAmelCase, **_lowerCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase ):
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = git.Repo(search_parent_directories=_lowerCAmelCase )
__UpperCAmelCase : List[str] = {
"repo_id": str(_lowerCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
return list(map(_lowerCAmelCase, _lowerCAmelCase ) )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
with open(_lowerCAmelCase, "wb" ) as f:
return pickle.dump(_lowerCAmelCase, _lowerCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase ):
def remove_articles(_UpperCAmelCase ):
return re.sub(R"\b(a|an|the)\b", " ", _lowerCAmelCase )
def white_space_fix(_UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase ):
__UpperCAmelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[str] = normalize_answer(_lowerCAmelCase ).split()
__UpperCAmelCase : Optional[Any] = normalize_answer(_lowerCAmelCase ).split()
__UpperCAmelCase : Tuple = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
__UpperCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
__UpperCAmelCase : List[str] = 1.0 * num_same / len(_lowerCAmelCase )
__UpperCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
__UpperCAmelCase : Dict = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
__UpperCAmelCase : List[Any] = 0
for hypo, pred in zip(_lowerCAmelCase, _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase, _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def __UpperCamelCase ( _UpperCAmelCase ):
return model_prefix.startswith("rag" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCAmelCase : int = "dropout_rate"
for p in extra_params:
if getattr(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase, _lowerCAmelCase ) and not hasattr(_lowerCAmelCase, equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase, _lowerCAmelCase )
continue
__UpperCAmelCase : Union[str, Any] = p if hasattr(_lowerCAmelCase, _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase, _lowerCAmelCase, getattr(_lowerCAmelCase, _lowerCAmelCase ) )
delattr(_lowerCAmelCase, _lowerCAmelCase )
return hparams, config
| 353 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCAmelCase__ : List[str] = trt.Logger(trt.Logger.WARNING)
UpperCAmelCase__ : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.tokenizer_name:
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
UpperCAmelCase__ : List[str] = args.per_device_eval_batch_size
UpperCAmelCase__ : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : int = 'temp_engine/bert-fp32.engine'
if args.fpaa:
UpperCAmelCase__ : Optional[int] = 'temp_engine/bert-fp16.engine'
if args.inta:
UpperCAmelCase__ : Any = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
UpperCAmelCase__ : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCAmelCase__ : Dict = [network.get_input(i) for i in range(network.num_inputs)]
UpperCAmelCase__ : Tuple = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCAmelCase__ : Optional[Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCAmelCase__ : str = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCAmelCase__ : str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = np.asarray(inputs["""input_ids"""] ,dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(inputs["""attention_mask"""] ,dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(inputs["""token_type_ids"""] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,lowercase__ )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,lowercase__ )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,lowercase__ )
# start time
SCREAMING_SNAKE_CASE__ : Tuple = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase__ ) for d_inp in d_inputs] + [int(lowercase__ ), int(lowercase__ )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase__ ,lowercase__ ,lowercase__ )
cuda.memcpy_dtoh_async(lowercase__ ,lowercase__ ,lowercase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE__ : Dict = time.time()
SCREAMING_SNAKE_CASE__ : Optional[Any] = end_time - start_time
SCREAMING_SNAKE_CASE__ : str = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCAmelCase__ : str = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase__ : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCAmelCase__ : Union[str, Any] = raw_datasets['validation'].column_names
UpperCAmelCase__ : str = 'question' if 'question' in column_names else column_names[0]
UpperCAmelCase__ : Union[str, Any] = 'context' if 'context' in column_names else column_names[1]
UpperCAmelCase__ : Dict = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCAmelCase__ : List[str] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
UpperCAmelCase__ : Optional[Any] = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation="""only_second""" if pad_on_right else """only_first""" ,max_length=lowercase__ ,stride=args.doc_stride ,return_overflowing_tokens=lowercase__ ,return_offsets_mapping=lowercase__ ,padding="""max_length""" ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE__ : List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE__ : int = tokenized_examples.sequence_ids(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE__ : Tuple = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
UpperCAmelCase__ : List[Any] = raw_datasets['validation']
# Validation Feature Creation
UpperCAmelCase__ : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
UpperCAmelCase__ : List[Any] = default_data_collator
UpperCAmelCase__ : Any = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
UpperCAmelCase__ : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case="eval" ):
SCREAMING_SNAKE_CASE__ : Tuple = postprocess_qa_predictions(
examples=lowercase__ ,features=lowercase__ ,predictions=lowercase__ ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=lowercase__ ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE__ : str = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE__ : List[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase__ ,label_ids=lowercase__ )
UpperCAmelCase__ : int = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase_ ( _snake_case ):
return trt.volume(engine.get_binding_shape(lowercase__ ) ) * engine.get_binding_dtype(lowercase__ ).itemsize
# Allocate device memory for inputs and outputs.
UpperCAmelCase__ : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCAmelCase__ : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCAmelCase__ : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCAmelCase__ : List[str] = cuda.mem_alloc(h_outputa.nbytes)
UpperCAmelCase__ : str = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCAmelCase__ : Tuple = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
UpperCAmelCase__ : Dict = 0.0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Union[str, Any] = timeit.default_timer()
UpperCAmelCase__ : List[str] = None
for step, batch in enumerate(eval_dataloader):
UpperCAmelCase__ , UpperCAmelCase__ : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = outputs
UpperCAmelCase__ : Union[str, Any] = torch.tensor(start_logits)
UpperCAmelCase__ : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCAmelCase__ : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
UpperCAmelCase__ : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
UpperCAmelCase__ : Optional[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCAmelCase__ : Union[str, Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
UpperCAmelCase__ : List[Any] = nested_truncate(all_preds, len(eval_dataset))
UpperCAmelCase__ : Union[str, Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
UpperCAmelCase__ : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCAmelCase__ : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 25 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase = '''\
Text data.
Second line of data.'''
UpperCAmelCase = '''file'''
@pytest.fixture(scope='session' )
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__lowercase =bytes(lowercase__, 'utf-8' )
with zstd.open(lowercase__, 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir, lowercase__ ), 'w' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format', ['gzip', 'xz', 'zstd'] )
def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[str], lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
__lowercase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__lowercase =input_paths[compression_format]
__lowercase =tmp_path / 'cache'
__lowercase =DownloadConfig(cache_dir=lowercase__, extract_compressed_file=lowercase__ )
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
with open(lowercase__ ) as f:
__lowercase =f.read()
with open(lowercase__ ) as f:
__lowercase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted', [True, False] )
@pytest.mark.parametrize('default_cache_dir', [True, False] )
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Tuple, lowercase__ : int, lowercase__ : int, lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase ='custom_cache'
__lowercase ='custom_extracted_dir'
__lowercase =tmp_path / 'custom_extracted_path'
if default_extracted:
__lowercase =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR', lowercase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH', str(lowercase__ ) )
__lowercase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowercase =xz_file
__lowercase =(
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowercase__ )
)
__lowercase =cached_path(lowercase__, download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__lowercase =str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
__lowercase =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__lowercase ='./__missing_file__.txt'
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
__lowercase =get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase__ ) as f:
__lowercase =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
http_get('https://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
ftp_get('ftp://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE', lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
__lowercase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(lowercase__ ):
fsspec_get('s3://huggingface.co', temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('s3://huggingface.co' )
| 141 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case__ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
snake_case__ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
snake_case__ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict="auto" , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Optional[Any]=0.9 , _lowerCamelCase : Any=5 , _lowerCamelCase : int=500 , _lowerCamelCase : Optional[int]="gpt2-large" , _lowerCamelCase : Optional[int]=-1 , _lowerCamelCase : Dict=1024 , _lowerCamelCase : Tuple=25 , _lowerCamelCase : str=5 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Union[str, Any]=25 , ):
"""simple docstring"""
A_ : int = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 357 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = tempfile.mkdtemp()
A_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : Dict = self.get_image_processor()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Any = self.prepare_image_inputs()
A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[str] = self.get_tokenizer()
A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : int = '''lower newer'''
A_ : str = processor(text=_lowerCamelCase )
A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : List[Any] = '''lower newer'''
A_ : Optional[int] = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : str = '''lower newer'''
A_ : List[str] = self.prepare_image_inputs()
A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase_ = 1_6
UpperCamelCase_ = 3_2
def lowercase__( __UpperCamelCase: Accelerator ,__UpperCamelCase: int = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE : str = load_dataset('glue' ,'mrpc' )
def tokenize_function(__UpperCamelCase: Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[str] = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : List[str] = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__UpperCamelCase: str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Dict = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Optional[int] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Dict = 8
else:
SCREAMING_SNAKE_CASE : str = None
return tokenizer.pad(
__UpperCamelCase ,padding='longest' ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['train'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets['validation'] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase_ = mocked_dataloaders # noqa: F811
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' ,__UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE : Any = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE : str = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Tuple = config['lr']
SCREAMING_SNAKE_CASE : Tuple = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config['seed'] )
SCREAMING_SNAKE_CASE : Tuple = int(config['batch_size'] )
SCREAMING_SNAKE_CASE : int = evaluate.load('glue' ,'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase )
def inner_training_loop(__UpperCamelCase: Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : str = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : str = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : str = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
SCREAMING_SNAKE_CASE : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,__UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 251 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["MaskFormerFeatureExtractor"]
UpperCamelCase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
UpperCamelCase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 358 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : List[str] = 32
def _A (__a , __a = 16 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Dict = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Dict = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_ : Any = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ : Union[str, Any] = mocked_dataloaders # noqa: F811
def _A (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : str = config['''lr''']
SCREAMING_SNAKE_CASE_ : Any = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ : str = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : Any = MAX_GPU_BATCH_SIZE
set_seed(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : Tuple = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Dict = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_00 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : List[str] = model(**__a )
SCREAMING_SNAKE_CASE_ : str = outputs.loss
SCREAMING_SNAKE_CASE_ : int = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE_ : Any = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__a ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__a , references=__a , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __a )
def _A () -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = """▁"""
UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : str = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase_ : str = {
"""facebook/xglm-564M""": 2048,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)]
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_))
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 91 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''GLPNFeatureExtractor''']
__UpperCAmelCase = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 368 |
'''simple docstring'''
from __future__ import annotations
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = order
# a_{0} ... a_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase__ = [0.0] * self.order
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if len(lowerCamelCase_ ) < self.order:
lowerCAmelCase__ = [1.0, *a_coeffs]
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
lowerCAmelCase__ = a_coeffs
lowerCAmelCase__ = b_coeffs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> float:
lowerCAmelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase__ = self.input_history[:-1]
lowerCAmelCase__ = self.output_history[:-1]
lowerCAmelCase__ = sample
lowerCAmelCase__ = result
return result | 228 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A : str = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def lowercase_ ( _A : int , _A : Any , _A : Optional[Any] , _A : Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCamelCase__ : List[str] = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _A , )
is not None
):
lowerCamelCase__ : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase__ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase__ : Union[str, Any] = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
lowerCamelCase__ : Dict = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
lowerCamelCase__ : Any = True
if not attribute_used:
lowerCamelCase__ : Union[str, Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase__ : int = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase__ : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase__ : Optional[int] = True
elif attribute.endswith("_token_id" ):
lowerCamelCase__ : List[Any] = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase__ : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase__ : List[str] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _A : Dict ):
"""simple docstring"""
lowerCamelCase__ : str = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase__ : Dict = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
lowerCamelCase__ : Dict = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase__ : int = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase__ : int = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase__ : Optional[int] = inspect.getsourcefile(_A )
lowerCamelCase__ : List[Any] = os.path.dirname(_A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase__ : Tuple = [os.path.join(_A , _A ) for fn in os.listdir(_A ) if fn.startswith("modeling_" )]
# Get the source code strings
lowerCamelCase__ : str = []
for path in modeling_paths:
if os.path.isfile(_A ):
with open(_A ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase__ : Any = []
for config_param, default_value in zip(_A , _A ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase__ : Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_A , _A , _A , _A ):
unused_attributes.append(attributes[0] )
return sorted(_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase__ : List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _A : inspect.isclass(_A )
and issubclass(_A , _A )
and inspect.getmodule(_A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase__ : int = check_config_attributes_being_used(_A )
if len(_A ) > 0:
lowerCamelCase__ : Dict = unused_attributes
if len(_A ) > 0:
lowerCamelCase__ : int = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_A )
if __name__ == "__main__":
check_config_attributes()
| 184 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : str = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _lowercase :
"""simple docstring"""
A__ = True
A__ = None
# Automatically constructed
A__ = "PIL.Image.Image"
A__ = pa.struct({"bytes": pa.binary(), "path": pa.string()})
A__ = field(default="Image" , init=lowercase__ , repr=lowercase__)
def __call__( self : Any ):
'''simple docstring'''
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = path.split("::" )[-1]
try:
lowerCamelCase__ : str = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : Any = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
lowerCamelCase__ : int = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
lowerCamelCase__ : List[str] = BytesIO(f.read() )
lowerCamelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : List[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCamelCase__ : Dict = storage.field("bytes" )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCamelCase__ : Dict = storage.field("path" )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Union[str, Any] ):
with xopen(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : str = f.read()
return bytes_
lowerCamelCase__ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : int = image.format
else:
lowerCamelCase__ : int = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_A , format=_A )
return buffer.getvalue()
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowerCamelCase__ : int = array.dtype
lowerCamelCase__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[str] = dtype.kind
lowerCamelCase__ : Optional[Any] = dtype.itemsize
lowerCamelCase__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : List[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : Optional[Any] = dtype_byteorder + dtype_kind + str(_A )
lowerCamelCase__ : int = np.dtype(_A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : List[Any] = PIL.Image.fromarray(array.astype(_A ) )
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowerCamelCase__ , lowerCamelCase__ : int = first_non_null_value(_A )
if isinstance(_A , _A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_A , np.ndarray ):
lowerCamelCase__ : Optional[Any] = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
else:
return objs
else:
return objs
| 184 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase__ ( a__: str , a__: List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_UpperCAmelCase = transform(a__ ).unsqueeze(0 ).to(a__ )
return image
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , a__ )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'blocks' , 'layers' , a__ )
if "attn" in key:
_UpperCAmelCase = re.sub(R'attn' , 'self_attn' , a__ )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , a__ )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , a__ )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , a__ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a__ )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , a__ )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , a__ )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , a__ )
return key
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(a__ )
else:
_UpperCAmelCase = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(a__ ).eval()
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCAmelCase = blip_decoder(pretrained=a__ , image_size=3_8_4 , vit='base' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
hf_model.load_state_dict(a__ )
_UpperCAmelCase = 3_8_4
_UpperCAmelCase = load_demo_image(image_size=a__ , device='cpu' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = tokenizer(['a picture of'] ).input_ids
_UpperCAmelCase = hf_model.generate(a__ , a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
_UpperCAmelCase = hf_model.generate(a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(a__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCAmelCase = blip_vqa(pretrained=a__ , image_size=a__ , vit='base' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(a__ )
hf_vqa_model.load_state_dict(a__ )
_UpperCAmelCase = ['How many dogs are in this image?']
_UpperCAmelCase = tokenizer(a__ , return_tensors='pt' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(a__ , a__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCAmelCase = blip_itm(pretrained=a__ , image_size=a__ , vit='base' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(a__ )
_UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach']
_UpperCAmelCase = tokenizer(
a__ , return_tensors='pt' , padding='max_length' , truncation=a__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(a__ )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase__ :int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ :List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 185 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( UpperCAmelCase ):
_a : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 185 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ : Optional[int] = 300 # TEMPERATURE (unit = K)
def A ( snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 165 | 1 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_snake_case = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_snake_case = None
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=UpperCamelCase__ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=UpperCamelCase__ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : List[str] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
def remove_articles(UpperCamelCase__ ):
return ARTICLES_REGEX.sub(""" """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
_a : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not s:
return []
return normalize_answer(UpperCamelCase__ ).split()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = get_tokens(UpperCamelCase__ )
_a : str = get_tokens(UpperCamelCase__ )
_a : Optional[Any] = collections.Counter(UpperCamelCase__ ) & collections.Counter(UpperCamelCase__ )
_a : Optional[Any] = sum(common.values() )
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_a : Dict = 1.0 * num_same / len(UpperCamelCase__ )
_a : Optional[Any] = 1.0 * num_same / len(UpperCamelCase__ )
_a : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = {}
_a : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : Union[str, Any] = qa["""id"""]
_a : Tuple = [t for t in qa["""answers"""]["""text"""] if normalize_answer(UpperCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_a : int = [""""""]
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
_a : Optional[Any] = preds[qid]
# Take max over all gold answers
_a : int = max(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for a in gold_answers )
_a : Dict = max(compute_fa(UpperCamelCase__ , UpperCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = {}
for qid, s in scores.items():
_a : str = na_probs[qid] > na_prob_thresh
if pred_na:
_a : Dict = float(not qid_to_has_ans[qid] )
else:
_a : List[str] = s
return new_scores
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if not qid_list:
_a : Optional[int] = len(UpperCamelCase__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_a : Dict = len(UpperCamelCase__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for k in new_eval:
_a : Optional[Any] = new_eval[k]
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
plt.step(UpperCamelCase__ , UpperCamelCase__ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(UpperCamelCase__ , UpperCamelCase__ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCamelCase__ )
plt.savefig(UpperCamelCase__ )
plt.clf()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
_a : List[Any] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
_a : Dict = 0.0
_a : Dict = 1.0
_a : Any = 0.0
_a : Union[str, Any] = [1.0]
_a : str = [0.0]
_a : Union[str, Any] = 0.0
for i, qid in enumerate(UpperCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_a : Any = true_pos / float(i + 1 )
_a : Optional[int] = true_pos / float(UpperCamelCase__ )
if i == len(UpperCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCamelCase__ )
recalls.append(UpperCamelCase__ )
if out_image:
plot_pr_curve(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if out_image_dir and not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
_a : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_a : List[str] = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_a : Union[str, Any] = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_a : List[Any] = {k: float(UpperCamelCase__ ) for k, v in qid_to_has_ans.items()}
_a : List[Any] = make_precision_recall_eval(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , out_image=os.path.join(UpperCamelCase__ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , """pr_exact""" )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , """pr_f1""" )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , """pr_oracle""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not qid_list:
return
_a : Union[str, Any] = [na_probs[k] for k in qid_list]
_a : int = np.ones_like(UpperCamelCase__ ) / float(len(UpperCamelCase__ ) )
plt.hist(UpperCamelCase__ , weights=UpperCamelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(UpperCamelCase__ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_a : Tuple = num_no_ans
_a : int = cur_score
_a : str = 0.0
_a : Dict = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
for i, qid in enumerate(UpperCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_a : List[Any] = scores[qid]
else:
if preds[qid]:
_a : Optional[Any] = -1
else:
_a : Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
_a : List[str] = cur_score
_a : Dict = na_probs[qid]
return 100.0 * best_score / len(UpperCamelCase__ ), best_thresh
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Optional[int] = find_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a , _a : List[str] = find_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_a : Dict = best_exact
_a : str = exact_thresh
_a : Dict = best_fa
_a : str = fa_thresh
def lowerCAmelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
_a : str = json.load(UpperCamelCase__ )
_a : Tuple = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_a : Union[str, Any] = json.load(UpperCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_a : Optional[int] = json.load(UpperCamelCase__ )
else:
_a : int = {k: 0.0 for k in preds}
_a : str = make_qid_to_has_ans(UpperCamelCase__ ) # maps qid to True/False
_a : Dict = [k for k, v in qid_to_has_ans.items() if v]
_a : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
_a , _a : int = get_raw_scores(UpperCamelCase__ , UpperCamelCase__ )
_a : List[str] = apply_no_ans_threshold(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.na_prob_thresh )
_a : List[str] = apply_no_ans_threshold(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.na_prob_thresh )
_a : str = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ )
if has_ans_qids:
_a : Dict = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ , qid_list=UpperCamelCase__ )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , """HasAns""" )
if no_ans_qids:
_a : Any = make_eval_dict(UpperCamelCase__ , UpperCamelCase__ , qid_list=UpperCamelCase__ )
merge_eval(UpperCamelCase__ , UpperCamelCase__ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(UpperCamelCase__ , UpperCamelCase__ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
else:
print(json.dumps(UpperCamelCase__ , indent=2 ) )
if __name__ == "__main__":
_snake_case = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 324 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = XLNetConfig.from_json_file(__UpperCamelCase )
lowerCAmelCase : Optional[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCAmelCase : Optional[Any] = finetuning_task
lowerCAmelCase : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCAmelCase : List[Any] = XLNetForSequenceClassification(__UpperCamelCase )
elif "squad" in finetuning_task:
lowerCAmelCase : List[str] = finetuning_task
lowerCAmelCase : Optional[int] = XLNetForQuestionAnswering(__UpperCamelCase )
else:
lowerCAmelCase : str = XLNetLMHeadModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
lowerCAmelCase : List[str] = os.path.join(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase : Any = os.path.join(__UpperCamelCase , __UpperCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__UpperCamelCase )}""" )
torch.save(model.state_dict() , __UpperCamelCase )
print(f"""Save configuration file to {os.path.abspath(__UpperCamelCase )}""" )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCAmelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 108 | from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 0 |
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case__ = ["torch", "torchsde"]
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def a ( cls : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def a ( cls : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
requires_backends(cls , ["torch", "torchsde"] )
| 366 |
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return (data["data"], data["target"])
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = XGBClassifier()
classifier.fit(a__ , a__ )
return classifier
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = load_iris()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = train_test_split(
a__ , a__ , test_size=0.25 )
SCREAMING_SNAKE_CASE : int = iris['''target_names''']
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE : Dict = xgboost(a__ , a__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ , a__ , a__ , display_labels=a__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 313 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313 | 1 |
UpperCAmelCase_ : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 198 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = field
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = Json(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ = dataset
A__ = path_or_buf
A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ = num_proc
A__ = "utf-8"
A__ = to_json_kwargs
def __A ( self ):
A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.to_json_kwargs.pop("orient" , "records" )
A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
A__ = self._write(
file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs )
return written
def __A ( self , UpperCAmelCase__ ):
A__ , A__ , A__ , A__ , A__ = args
A__ = query_table(
table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ = batch.to_pandas().to_json(
path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ):
A__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCAmelCase__ )
else:
A__ , A__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(UpperCAmelCase__ )
return written
| 198 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.