code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = JukeboxTokenizer
__UpperCAmelCase : Union[str, Any] = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
import torch
snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
import torch
snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 203 | 1 |
"""simple docstring"""
from math import pi, sqrt
def snake_case__ ( __lowerCamelCase : float ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case__ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : Union[str, Any] = 1.0
while num:
_lowercase : Optional[int] = float(input("Gamma of: "))
print(f'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 272 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272 | 1 |
import argparse
from collections import defaultdict
import yaml
__UpperCamelCase : Optional[Any] = 'docs/source/en/_toctree.yml'
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = new_doc_list
SCREAMING_SNAKE_CASE : Union[str, Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE : Optional[Any] = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE : Tuple = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(_lowercase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
SCREAMING_SNAKE_CASE : str = sorted(_lowercase , key=lambda _lowercase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowercase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(_lowercase )
# Sort
return overview_doc
def A ( _lowercase=False ):
with open(_lowercase , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE : int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
SCREAMING_SNAKE_CASE : int = api_doc[scheduler_idx]['''sections''']
SCREAMING_SNAKE_CASE : Tuple = clean_doc_toc(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
SCREAMING_SNAKE_CASE : List[str] = True
if overwrite:
SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE : List[Any] = api_doc
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowercase , allow_unicode=_lowercase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def A ( _lowercase=False ):
with open(_lowercase , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : int = api_doc[pipeline_idx]['''sections''']
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
SCREAMING_SNAKE_CASE : Dict = pipeline_doc['''section''']
SCREAMING_SNAKE_CASE : str = clean_doc_toc(_lowercase )
if overwrite:
SCREAMING_SNAKE_CASE : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowercase )
# sort overall pipeline doc
SCREAMING_SNAKE_CASE : List[str] = clean_doc_toc(_lowercase )
if new_pipeline_docs != pipeline_docs:
SCREAMING_SNAKE_CASE : Optional[int] = True
if overwrite:
SCREAMING_SNAKE_CASE : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
SCREAMING_SNAKE_CASE : Dict = api_doc
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowercase , allow_unicode=_lowercase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCamelCase : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 182 | import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(_lowercase )
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE : List[str] = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def A ( _lowercase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : Any = prepare_input(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 182 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , lowercase_ : int = 6_5536 , lowercase_ : Optional[int] = None , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 0 , lowercase_ : str = "fourier" , lowercase_ : bool = True , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase_ : Tuple[str] = "UNetMidBlock1D" , lowercase_ : str = None , lowercase_ : Tuple[int] = (32, 32, 64) , lowercase_ : str = None , lowercase_ : int = 8 , lowercase_ : int = 1 , lowercase_ : bool = False , ):
super().__init__()
UpperCamelCase__ : int =sample_size
# time
if time_embedding_type == "fourier":
UpperCamelCase__ : Dict =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase_ , log=lowercase_ , flip_sin_to_cos=lowercase_ )
UpperCamelCase__ : List[Any] =2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCamelCase__ : Dict =Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase_ , downscale_freq_shift=lowercase_ )
UpperCamelCase__ : List[str] =block_out_channels[0]
if use_timestep_embedding:
UpperCamelCase__ : Union[str, Any] =block_out_channels[0] * 4
UpperCamelCase__ : Optional[int] =TimestepEmbedding(
in_channels=lowercase_ , time_embed_dim=lowercase_ , act_fn=lowercase_ , out_dim=block_out_channels[0] , )
UpperCamelCase__ : Optional[int] =nn.ModuleList([] )
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : str =nn.ModuleList([] )
UpperCamelCase__ : Tuple =None
# down
UpperCamelCase__ : Dict =in_channels
for i, down_block_type in enumerate(lowercase_ ):
UpperCamelCase__ : Optional[Any] =output_channel
UpperCamelCase__ : Dict =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCamelCase__ : str =i == len(lowercase_ ) - 1
UpperCamelCase__ : Any =get_down_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase_ )
# mid
UpperCamelCase__ : Tuple =get_mid_block(
lowercase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase_ , add_downsample=lowercase_ , )
# up
UpperCamelCase__ : Optional[Any] =list(reversed(lowercase_ ) )
UpperCamelCase__ : str =reversed_block_out_channels[0]
if out_block_type is None:
UpperCamelCase__ : Union[str, Any] =out_channels
else:
UpperCamelCase__ : List[Any] =block_out_channels[0]
for i, up_block_type in enumerate(lowercase_ ):
UpperCamelCase__ : Any =output_channel
UpperCamelCase__ : Dict =(
reversed_block_out_channels[i + 1] if i < len(lowercase_ ) - 1 else final_upsample_channels
)
UpperCamelCase__ : Union[str, Any] =i == len(lowercase_ ) - 1
UpperCamelCase__ : Tuple =get_up_block(
lowercase_ , num_layers=lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase_ )
UpperCamelCase__ : int =output_channel
# out
UpperCamelCase__ : Optional[Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCamelCase__ : Optional[int] =get_out_block(
out_block_type=lowercase_ , num_groups_out=lowercase_ , embed_dim=block_out_channels[0] , out_channels=lowercase_ , act_fn=lowercase_ , fc_dim=block_out_channels[-1] // 4 , )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : torch.FloatTensor , lowercase_ : Union[torch.Tensor, float, int] , lowercase_ : bool = True , ):
UpperCamelCase__ : List[Any] =timestep
if not torch.is_tensor(lowercase_ ):
UpperCamelCase__ : Dict =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Union[str, Any] =timesteps[None].to(sample.device )
UpperCamelCase__ : Optional[Any] =self.time_proj(lowercase_ )
if self.config.use_timestep_embedding:
UpperCamelCase__ : List[Any] =self.time_mlp(lowercase_ )
else:
UpperCamelCase__ : List[Any] =timestep_embed[..., None]
UpperCamelCase__ : Any =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCamelCase__ : List[str] =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCamelCase__ : Tuple =()
for downsample_block in self.down_blocks:
UpperCamelCase__ , UpperCamelCase__ : List[str] =downsample_block(hidden_states=lowercase_ , temb=lowercase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCamelCase__ : Dict =self.mid_block(lowercase_ , lowercase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCamelCase__ : Dict =down_block_res_samples[-1:]
UpperCamelCase__ : Tuple =down_block_res_samples[:-1]
UpperCamelCase__ : str =upsample_block(lowercase_ , res_hidden_states_tuple=lowercase_ , temb=lowercase_ )
# 5. post-process
if self.out_block:
UpperCamelCase__ : List[Any] =self.out_block(lowercase_ , lowercase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase_ )
| 157 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """mobilenet_v1"""
def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.9_99 , lowercase=0.02 , lowercase=0.0_01 , **lowercase , ):
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : Any = image_size
_lowerCamelCase : str = depth_multiplier
_lowerCamelCase : Dict = min_depth
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Union[str, Any] = tf_padding
_lowerCamelCase : str = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : int = layer_norm_eps
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_ ( self ):
return 1E-4 | 96 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : int = LongformerTokenizer
A__ : Tuple = True
A__ : Tuple = LongformerTokenizerFast
A__ : List[Any] = True
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase_ : List[Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase_ : List[str] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Tuple ,**lowerCamelCase_: Any ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[Any] ) -> int:
UpperCAmelCase_ : str = """lower newer"""
UpperCAmelCase_ : Optional[Any] = """lower newer"""
return input_text, output_text
def A__ ( self: int ) -> Optional[int]:
UpperCAmelCase_ : int = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : int = """lower newer"""
UpperCAmelCase_ : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase_ : List[str] = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" ,add_special_tokens=lowerCamelCase_ ) ,[0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" ,add_special_tokens=lowerCamelCase_ ) ,[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] ,)
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase_ : Dict = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(
"""sequence builders""" ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.encode(
"""sequence builders""" ,"""multi-sequence build""" ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ ,lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : List[str] = """Encode this sequence."""
UpperCAmelCase_ : Optional[int] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase_ : str = tokenizer.encode(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ )
# Testing spaces after special tokens
UpperCAmelCase_ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ )} ) # mask token has a left space
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
UpperCAmelCase_ : int = """Encode <mask> sequence"""
UpperCAmelCase_ : Optional[int] = """Encode <mask>sequence"""
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = encoded.index(lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = encoded.index(lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Any ) -> List[Any]:
pass
def A__ ( self: Dict ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : List[str] = """A, <mask> AllenNLP sentence."""
UpperCAmelCase_ : List[str] = tokenizer_r.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer_p.encode_plus(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A__ ( self: Dict ) -> int:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] ,lowerCamelCase_ )
self.assertEqual(post_processor_state["""add_prefix_space"""] ,lowerCamelCase_ )
self.assertEqual(post_processor_state["""trim_offsets"""] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Tuple:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : List[str] = F'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : str = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : List[Any] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ ,use_fast=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer_r(lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) ,)
| 59 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = ["pixel_values"]
def __init__( self: Optional[Any] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Dict[str, int]] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Optional[int] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[Any] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: str ,) -> np.ndarray:
UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: float ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Union[str, Any] ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: ImageInput ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[float] = None ,lowerCamelCase_: Optional[bool] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: List[str] ,) -> BatchFeature:
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : List[str] = get_size_dict(lowerCamelCase_ )
if not is_batched(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = [images]
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Tuple = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : int = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Optional[int] = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : str = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Dict = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Dict = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 59 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "summarization"
SCREAMING_SNAKE_CASE__ :str = ["loss"]
SCREAMING_SNAKE_CASE__ :Any = ROUGE_KEYS
SCREAMING_SNAKE_CASE__ :List[str] = "rouge2"
def __init__( self : Any , __a : Optional[int] , **__a : int ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCamelCase : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
_UpperCamelCase : Optional[int] = Path(self.output_dir ) / "metrics.json"
_UpperCamelCase : Tuple = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_UpperCamelCase : Any = 0
_UpperCamelCase : Optional[int] = defaultdict(__a )
_UpperCamelCase : Optional[Any] = self.config.model_type
_UpperCamelCase : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_UpperCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCamelCase : Any = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_UpperCamelCase : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCamelCase : Dict = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCamelCase : Any = get_git_info()["repo_sha"]
_UpperCamelCase : Dict = hparams.num_workers
_UpperCamelCase : Optional[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
_UpperCamelCase : Optional[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCamelCase : Dict = self.decoder_start_token_id
_UpperCamelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCamelCase : List[Any] = self.hparams.eval_max_gen_length
else:
_UpperCamelCase : List[str] = self.model.config.max_length
_UpperCamelCase : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
_UpperCamelCase : Tuple = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
_UpperCamelCase : Optional[Any] = True
return readable_batch
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Dict , **__a : List[Any] ) -> Any:
return self.model(__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[int] ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : dict ) -> Tuple:
_UpperCamelCase : Optional[int] = self.tokenizer.pad_token_id
_UpperCamelCase, _UpperCamelCase : Dict = batch["input_ids"], batch["attention_mask"]
_UpperCamelCase : Any = batch["labels"]
if isinstance(self.model , __a ):
_UpperCamelCase : Tuple = self.model._shift_right(__a )
else:
_UpperCamelCase : Optional[int] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCamelCase : Dict = decoder_input_ids
self.save_readable_batch(__a )
_UpperCamelCase : str = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
_UpperCamelCase : List[Any] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCamelCase : List[Any] = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCamelCase : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCamelCase : Dict = nn.functional.log_softmax(__a , dim=-1 )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.tokenizer.pad_token_id
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[int] , __a : Optional[int] ) -> Dict:
_UpperCamelCase : str = self._step(__a )
_UpperCamelCase : Dict = dict(zip(self.loss_names , __a ) )
# tokens per batch
_UpperCamelCase : Tuple = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_UpperCamelCase : List[str] = batch["input_ids"].shape[0]
_UpperCamelCase : str = batch["input_ids"].eq(self.pad ).sum()
_UpperCamelCase : int = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Any , __a : Optional[int] ) -> Dict:
return self._generative_step(__a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : Union[str, Any] , __a : int="val" ) -> Dict:
self.step_count += 1
_UpperCamelCase : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCamelCase : str = losses["loss"]
_UpperCamelCase : Dict = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_UpperCamelCase : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCamelCase : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
_UpperCamelCase : List[str] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
_UpperCamelCase : List[str] = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
_UpperCamelCase : Union[str, Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : Optional[int] ) -> Dict:
return calculate_rouge(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : dict ) -> dict:
_UpperCamelCase : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCamelCase : Union[str, Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCamelCase : Optional[Any] = (time.time() - ta) / batch["input_ids"].shape[0]
_UpperCamelCase : List[str] = self.ids_to_clean_text(__a )
_UpperCamelCase : List[str] = self.ids_to_clean_text(batch["labels"] )
_UpperCamelCase : Union[str, Any] = self._step(__a )
_UpperCamelCase : Tuple = dict(zip(self.loss_names , __a ) )
_UpperCamelCase : Dict = self.calc_generative_metrics(__a , __a )
_UpperCamelCase : Union[str, Any] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Union[str, Any] , __a : int ) -> Dict:
return self._generative_step(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : int ) -> Tuple:
return self.validation_epoch_end(__a , prefix="test" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, Any] ) -> SeqaSeqDataset:
_UpperCamelCase : Any = self.n_obs[type_path]
_UpperCamelCase : int = self.target_lens[type_path]
_UpperCamelCase : int = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def __SCREAMING_SNAKE_CASE ( self : str , __a : str , __a : int , __a : bool = False ) -> DataLoader:
_UpperCamelCase : Optional[int] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCamelCase : Optional[Any] = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCamelCase : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
_UpperCamelCase : Tuple = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Optional[int] , __a : List[str] ) -> List[str]:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"--max_source_length" , default=1024 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=__a )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=__a )
parser.add_argument("--max_tokens_per_batch" , type=__a , default=__a )
parser.add_argument("--logger_name" , type=__a , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=__a , default=-1 , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=__a , default=500 , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=__a , default=-1 , required=__a , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=__a , default="summarization" , required=__a , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=__a , default=0.0 , required=__a )
parser.add_argument("--src_lang" , type=__a , default="" , required=__a )
parser.add_argument("--tgt_lang" , type=__a , default="" , required=__a )
parser.add_argument("--eval_beams" , type=__a , default=__a , required=__a )
parser.add_argument(
"--val_metric" , type=__a , default=__a , required=__a , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=__a , default=__a , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=__a , default=1 , required=__a , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=__a , default=-1 , required=__a , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "translation"
SCREAMING_SNAKE_CASE__ :Optional[Any] = ["loss"]
SCREAMING_SNAKE_CASE__ :List[str] = ["bleu"]
SCREAMING_SNAKE_CASE__ :List[Any] = "bleu"
def __init__( self : Union[str, Any] , __a : int , **__a : List[Any] ) -> Optional[Any]:
super().__init__(__a , **__a )
_UpperCamelCase : Optional[Any] = hparams.src_lang
_UpperCamelCase : Optional[int] = hparams.tgt_lang
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[int] , __a : Dict ) -> dict:
return calculate_bleu(__a , __a )
def lowercase__ ( lowercase_ ,lowercase_=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowercase_ )
check_output_dir(lowercase_ ,expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCamelCase : SummarizationModule = SummarizationModule(lowercase_ )
else:
_UpperCamelCase : SummarizationModule = TranslationModule(lowercase_ )
_UpperCamelCase : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
_UpperCamelCase : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : str = os.environ.get("WANDB_PROJECT" ,lowercase_ )
_UpperCamelCase : List[Any] = WandbLogger(name=model.output_dir.name ,project=lowercase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase : Optional[Any] = WandbLogger(name=model.output_dir.name ,project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
_UpperCamelCase : Any = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
_UpperCamelCase : int = False
_UpperCamelCase : Dict = args.val_metric == "loss"
_UpperCamelCase : pl.Trainer = generic_train(
lowercase_ ,lowercase_ ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,lowercase_ ) ,early_stopping_callback=lowercase_ ,logger=lowercase_ ,)
pickle_save(model.hparams ,model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
_UpperCamelCase : Tuple = ""
_UpperCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"*.ckpt" ) ,recursive=lowercase_ ) )
if checkpoints:
_UpperCamelCase : Dict = checkpoints[-1]
_UpperCamelCase : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = pl.Trainer.add_argparse_args(parser)
lowerCamelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
main(args)
| 310 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 310 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowercase : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_lowercase : List[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_lowercase : Optional[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return float((preds == labels).mean() )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str="binary" ):
"""simple docstring"""
lowercase_ : Dict = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : str = {}
for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowercase_ : Any = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase_ : List[str] = [(pred, label)]
lowercase_ , lowercase_ : Dict = [], []
for question, preds_labels in question_map.items():
lowercase_ , lowercase_ : List[Any] = zip(*__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) )
ems.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _snake_case ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def _snake_case ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase_ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase_ : str = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 93 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 0 ):
__SCREAMING_SNAKE_CASE = length or len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = list_data[i + 1], list_data[i]
__SCREAMING_SNAKE_CASE = True
return list_data if not swapped else bubble_sort(UpperCamelCase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase__ : List[str] = logging.getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=UpperCamelCase_ )
snake_case_ = {
"repo_id": str(UpperCamelCase_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase_ , "git_log.json" ) , "w" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ , indent=4 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if params.n_gpu <= 0:
snake_case_ = 0
snake_case_ = -1
snake_case_ = True
snake_case_ = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ = int(os.environ["WORLD_SIZE"] )
snake_case_ = int(os.environ["N_GPU_NODE"] )
snake_case_ = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ = params.world_size // params.n_gpu_per_node
snake_case_ = params.global_rank // params.n_gpu_per_node
snake_case_ = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ = 1
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
snake_case_ = 1
snake_case_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ = params.node_id == 0 and params.local_rank == 0
snake_case_ = params.n_nodes > 1
# summary
snake_case_ = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 187 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : int ="""xmod"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=30522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-1_2 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=("en_XX",) , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : str , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = pre_norm
SCREAMING_SNAKE_CASE__ = adapter_reduction_factor
SCREAMING_SNAKE_CASE__ = adapter_layer_norm
SCREAMING_SNAKE_CASE__ = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE__ = ln_before_adapter
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = default_language
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : List[Any] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 176 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase_ : Optional[Any] = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
UpperCAmelCase_ : Tuple = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
UpperCAmelCase_ : Optional[Any] = r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = 0.0
for i, j in zip(__lowercase , __lowercase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowercase , __lowercase ) else 0.0
A__ = n_correct / len(__lowercase )
return {
"accuracy": accuracy,
}
| 359 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=None ):
A__ = data
A__ = None
def __repr__( self ):
A__ = []
A__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
A__ = temp.next
return "->".join(UpperCAmelCase__ )
def UpperCamelCase ( _A : list )-> Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(_A ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def UpperCamelCase ( _A : Node )-> None:
"""simple docstring"""
if head_node is not None and isinstance(_A , _A ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_A )
print("Elements in Reverse:" )
print_reverse(_A )
if __name__ == "__main__":
main()
| 198 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( snake_case_ ):
UpperCAmelCase__ : str = '''pix2struct_text_model'''
UpperCAmelCase__ : Optional[int] = ['''past_key_values''']
UpperCAmelCase__ : Union[str, Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0244, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1e-6, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> List[Any]:
UpperCamelCase : str = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : int = d_kv
UpperCamelCase : Optional[Any] = d_ff
UpperCamelCase : Any = num_layers
UpperCamelCase : Optional[Any] = num_heads
UpperCamelCase : Tuple = relative_attention_num_buckets
UpperCamelCase : Dict = relative_attention_max_distance
UpperCamelCase : Union[str, Any] = dropout_rate
UpperCamelCase : Any = layer_norm_epsilon
UpperCamelCase : Dict = initializer_factor
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : str = eos_token_id
UpperCamelCase : List[Any] = decoder_start_token_id
# for backwards compatibility
UpperCamelCase : Optional[int] = dense_act_fn
super().__init__(
pad_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, tie_word_embeddings=UpperCamelCase__, is_decoder=UpperCamelCase__, **UpperCamelCase__, )
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase : Any = cls.get_config_dict(UpperCamelCase__, **UpperCamelCase__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__, **UpperCamelCase__ )
class lowerCAmelCase_ ( snake_case_ ):
UpperCAmelCase__ : str = '''pix2struct_vision_model'''
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=1e-6, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-10, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=128, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**UpperCamelCase__ )
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : List[str] = patch_embed_hidden_size
UpperCamelCase : Optional[Any] = d_ff
UpperCamelCase : Union[str, Any] = dropout_rate
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Union[str, Any] = initializer_factor
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : List[str] = dense_act_fn
UpperCamelCase : List[str] = seq_len
UpperCamelCase : Optional[Any] = relative_attention_num_buckets
UpperCamelCase : Optional[Any] = relative_attention_max_distance
UpperCamelCase : Optional[Any] = d_kv
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase : Optional[int] = cls.get_config_dict(UpperCamelCase__, **UpperCamelCase__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__, **UpperCamelCase__ )
class lowerCAmelCase_ ( snake_case_ ):
UpperCAmelCase__ : Optional[int] = '''pix2struct'''
UpperCAmelCase__ : str = True
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(tie_word_embeddings=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, **UpperCamelCase__ )
if text_config is None:
UpperCamelCase : Dict = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCamelCase : str = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCamelCase : Optional[int] = PixaStructTextConfig(**UpperCamelCase__ )
UpperCamelCase : Any = PixaStructVisionConfig(**UpperCamelCase__ )
UpperCamelCase : Any = self.text_config.decoder_start_token_id
UpperCamelCase : Dict = self.text_config.pad_token_id
UpperCamelCase : List[str] = self.text_config.eos_token_id
UpperCamelCase : Optional[Any] = initializer_factor
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : List[Any] = self.initializer_range
UpperCamelCase : Union[str, Any] = self.initializer_range
UpperCamelCase : List[Any] = is_vqa
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase : List[Any] = self.text_config.to_dict()
UpperCamelCase : Dict = self.vision_config.to_dict()
UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 119 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Dict = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case : Any = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : List[Any] = cached_file(UpperCamelCase__ , "conf" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = mock.Mock()
snake_case : List[Any] = 500
snake_case : int = {}
snake_case : Optional[int] = HTTPError
snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
snake_case : int = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : str = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 203 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_lowerCAmelCase = DatasetInfosDict.from_directory(snake_case_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : DatasetInfo ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = str(snake_case_ )
dataset_info.write_to_directory(snake_case_ )
_lowerCAmelCase = DatasetInfo.from_directory(snake_case_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case_ , """dataset_info.json""" ) )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_lowerCAmelCase = dataset_info._to_yaml_dict()
assert sorted(snake_case_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowerCAmelCase = yaml.safe_dump(snake_case_ )
_lowerCAmelCase = yaml.safe_load(snake_case_ )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = DatasetInfo()
_lowerCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = str(snake_case_ )
dataset_infos_dict.write_to_directory(snake_case_ )
_lowerCAmelCase = DatasetInfosDict.from_directory(snake_case_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case_ , """README.md""" ) ) | 317 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 1 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
if len(__magic_name__ ) != 32:
raise ValueError("""Input must be of length 32""" )
UpperCamelCase :int = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:]
UpperCamelCase :Union[str, Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :str = B""""""
for char in message:
bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" )
UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__magic_name__ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCamelCase :Tuple = bit_string[pos : pos + 512]
UpperCamelCase :Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
UpperCamelCase :List[str] = format(__magic_name__ , """032b""" )
UpperCamelCase :Any = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase :Tuple = preprocess(__magic_name__ )
UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase :Union[str, Any] = 0X67_45_23_01
UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89
UpperCamelCase :List[str] = 0X98_BA_DC_FE
UpperCamelCase :int = 0X10_32_54_76
UpperCamelCase :int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCamelCase :Optional[Any] = aa
UpperCamelCase :Any = ba
UpperCamelCase :Tuple = ca
UpperCamelCase :List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase :int = d ^ (b & (c ^ d))
UpperCamelCase :Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase :str = c ^ (d & (b ^ c))
UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase :str = b ^ c ^ d
UpperCamelCase :Optional[int] = (3 * i + 5) % 16
else:
UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ ))
UpperCamelCase :int = (7 * i) % 16
UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase :Tuple = d
UpperCamelCase :str = c
UpperCamelCase :Tuple = b
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase_ : int = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape
else:
UpperCamelCase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase :str = value
elif weight_type == "weight_g":
UpperCamelCase :int = value
elif weight_type == "weight_v":
UpperCamelCase :int = value
elif weight_type == "bias":
UpperCamelCase :List[Any] = value
else:
UpperCamelCase :Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Dict = fairseq_model.state_dict()
UpperCamelCase :int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase :str = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase :Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase :Optional[int] = True
if "*" in mapped_key:
UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2]
UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
UpperCamelCase :List[Any] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase :List[Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase :Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase :List[str] = """weight"""
else:
UpperCamelCase :Optional[int] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase :int = name.split(""".""" )
UpperCamelCase :str = int(items[0] )
UpperCamelCase :str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase :Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int:
"""simple docstring"""
UpperCamelCase :List[Any] = torch.load(__magic_name__ )
UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCamelCase :int = WavLMOrig(__magic_name__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ )
else:
UpperCamelCase :Any = WavLMConfig()
UpperCamelCase :Dict = WavLMModel(__magic_name__ )
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavlm.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 38 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCAmelCase_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
snake_case__ : Optional[datasets.Features] = None
def SCREAMING_SNAKE_CASE_ ( __A : "pyspark.sql.DataFrame" , __A : List[int] , ) -> Optional[Any]:
"""simple docstring"""
import pyspark
def generate_fn():
a_ : str = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
a_ : List[Any] = df_with_partition_id.select('*' ).where(F"""part_id = {partition_id}""" ).drop('part_id' )
a_ : List[Any] = partition_df.collect()
a_ : Union[str, Any] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE__ ( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : int=None , ) -> Optional[Any]:
a_ : int = df
a_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
a_ : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : str ) -> Optional[int]:
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.random.Generator ) -> "SparkExamplesIterable":
a_ : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> "SparkExamplesIterable":
a_ : Any = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.partition_order )
class SCREAMING_SNAKE_CASE__ ( datasets.DatasetBuilder ):
snake_case__ : int = SparkConfig
def __init__( self : str , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
import pyspark
a_ : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
a_ : Tuple = df
a_ : str = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a_ : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ) -> int:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
a_ : Tuple = self.df.count()
a_ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a_ : Tuple = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a_ : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
a_ : Union[str, Any] = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
a_ : str = ParquetWriter if file_format == 'parquet' else ArrowWriter
a_ : Any = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
a_ : Optional[Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a_ : List[Any] = self.config.features
a_ : Any = self._writer_batch_size
a_ : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : int ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a_ : int = pyspark.TaskContext().taskAttemptId()
a_ : Union[str, Any] = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
a_ : Union[str, Any] = 0
a_ : Optional[int] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a_ , a_ : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
a_ : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
a_ , a_ : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
a_ : Union[str, Any] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : int = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> List[str]:
self._validate_cache_dir()
a_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = not is_remote_filesystem(self._fs )
a_ : List[Any] = os.path.join if is_local else posixpath.join
a_ : Dict = '-TTTTT-SSSSS-of-NNNNN'
a_ : List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
a_ : Optional[int] = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
a_ : Dict = 0
a_ : Optional[int] = 0
a_ : str = 0
a_ : Optional[int] = []
a_ : Optional[Any] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = total_num_examples
a_ : Tuple = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
a_ : Optional[int] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a_ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
a_ : Optional[int] = []
a_ : Union[str, Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_ , a_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
a_ : List[Any] = 0
a_ : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 120 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , torch_builtin(SCREAMING_SNAKE_CASE__ ) ) )
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , gelu_new(SCREAMING_SNAKE_CASE__ ) ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
a_ : str = get_activation('gelu_10' )
a_ : Tuple = torch_builtin(SCREAMING_SNAKE_CASE__ )
a_ : str = geluaa(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation('bogus' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Any = get_activation('gelu' )
a_ : Any = 1
a_ : int = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = acta.a
| 120 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """bert-base-cased"""
__SCREAMING_SNAKE_CASE : Any = """fp16"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """bf16"""
__SCREAMING_SNAKE_CASE : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Any = F"""{i + 1}"""
_UpperCAmelCase : str = strategy
with mockenv_context(**A ):
_UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _A ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**A ):
_UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _A ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
_UpperCAmelCase : Optional[int] = self.dist_env.copy()
_UpperCAmelCase : Tuple = state_dict_type
with mockenv_context(**A ):
_UpperCAmelCase : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase : Optional[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase : Any = "2000"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Tuple = "TRANSFORMER_BASED_WRAP"
_UpperCAmelCase : Tuple = "T5Layer"
with mockenv_context(**A ):
_UpperCAmelCase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = "SIZE_BASED_WRAP"
_UpperCAmelCase : List[str] = "0"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _A ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : str = mp_dtype
with mockenv_context(**A ):
_UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase : Union[str, Any] = torch.bfloataa
_UpperCAmelCase : Optional[int] = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Dict = str(A ).lower()
with mockenv_context(**A ):
_UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = 0.82
_UpperCAmelCase : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_UpperCAmelCase : Tuple = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase : Tuple = 160
_UpperCAmelCase : Any = 160
_UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_UpperCAmelCase : Tuple = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(A ):
_UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase : Optional[Any] = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
_UpperCAmelCase : Optional[int] = cmd_config[:-1]
_UpperCAmelCase : List[str] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Any = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_UpperCAmelCase : List[str] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 31 | 1 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(lowercase , lowercase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
SCREAMING_SNAKE_CASE : List[Any] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = """markuplm"""
def __init__( self , lowerCAmelCase__=30_522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=256 , lowerCAmelCase__=1_024 , lowerCAmelCase__=216 , lowerCAmelCase__=1_001 , lowerCAmelCase__=32 , lowerCAmelCase__=50 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE = max_depth
SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE = tag_pad_id
SCREAMING_SNAKE_CASE = subs_pad_id
SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 113 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=13 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'down_blocks.{depth}'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - 1}' if int(SCREAMING_SNAKE_CASE_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 1_00
SCREAMING_SNAKE_CASE = 33
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE_ )
print('Diff max' , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 113 | 1 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __magic_name__ ( _a ):
def __init__( self , __snake_case="" , __snake_case="train" ) -> Optional[int]:
'''simple docstring'''
assert os.path.isdir(snake_case_ )
__a =[]
__a =os.listdir(snake_case_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__a =os.path.join(snake_case_ , snake_case_ )
if not os.path.isfile(snake_case_ ):
continue
self.documents.append(snake_case_ )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a =self.documents[idx]
__a =document_path.split('/' )[-1]
with open(snake_case_ , encoding='utf-8' ) as source:
__a =source.read()
__a =process_story(snake_case_ )
return document_name, story_lines, summary_lines
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
__a =list(filter(lambda _snake_case : len(_lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__a =[_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
__a =[]
__a =deque(_lowerCAmelCase )
while True:
try:
__a =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__a =list(filter(lambda _snake_case : not t.startswith('@highlight' ) , _lowerCAmelCase ) )
return story_lines, summary_lines
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
__a =[""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase_( _snake_case : str , _snake_case : int , _snake_case : str ):
"""simple docstring"""
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def UpperCamelCase_( _snake_case : int , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =torch.ones_like(_lowerCAmelCase )
__a =sequence == pad_token_id
__a =0
return mask
def UpperCamelCase_( _snake_case : Dict , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
__a =[tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
__a =[token for sentence in story_lines_token_ids for token in sentence]
__a =[tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
__a =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase_( _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
__a =[]
for sequence in batch:
__a =-1
__a =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 371 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : Optional[Any] = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8660254])
_lowerCAmelCase : Any = numpy.array([1, 0])
_lowerCAmelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase_( _snake_case : list[numpy.ndarray] , _snake_case : int ):
"""simple docstring"""
__a =initial_vectors
for _ in range(_snake_case ):
__a =iteration_step(_snake_case )
return vectors
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =[]
for i, start_vector in enumerate(vectors[:-1] ):
__a =vectors[i + 1]
new_vectors.append(_snake_case )
__a =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase_( _snake_case : numpy.ndarray , _snake_case : float ):
"""simple docstring"""
__a =numpy.radians(_snake_case )
__a , __a =numpy.cos(_snake_case ), numpy.sin(_snake_case )
__a =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : list[numpy.ndarray] ):
"""simple docstring"""
__a =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__a , __a =zip(*_snake_case )
plt.plot(_snake_case , _snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 308 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
A: Union[str, Any] = UNetaDModel
A: Any = """sample"""
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = 4
UpperCamelCase__ : int = 3
UpperCamelCase__ : Optional[Any] = (32, 32)
UpperCamelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Tuple = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
A: Dict = UNetaDModel
A: Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = 4
UpperCamelCase__ : Optional[Any] = 4
UpperCamelCase__ : Any = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[Any] = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
return (4, 32, 32)
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return (4, 32, 32)
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=SCREAMING_SNAKE_CASE__ )
model_accelerate.to(SCREAMING_SNAKE_CASE__ )
model_accelerate.eval()
UpperCamelCase__ : Dict = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ : Optional[int] = noise.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Tuple = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Dict = model_accelerate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=SCREAMING_SNAKE_CASE__ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
model_normal_load.to(SCREAMING_SNAKE_CASE__ )
model_normal_load.eval()
UpperCamelCase__ : Optional[int] = model_normal_load(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''sample''']
assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ : str = noise.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
UpperCamelCase__ : Any = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ : Union[str, Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 ) )
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
A: Any = UNetaDModel
A: Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=(32, 32) ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = 4
UpperCamelCase__ : int = 3
UpperCamelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCamelCase__ : Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[Any] = self.dummy_input
UpperCamelCase__ : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[int] = noise
UpperCamelCase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[Any] = 4
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : int = (256, 256)
UpperCamelCase__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : int = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
UpperCamelCase__ : int = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
UpperCamelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ : Dict = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-2 ) )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : int = (32, 32)
UpperCamelCase__ : Any = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : List[str] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
UpperCamelCase__ : Union[str, Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ : List[str] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-2 ) )
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
pass
| 146 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229 | 0 |
import os
_UpperCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase_( snake_case__: str ) -> int:
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
while index < len(snake_case__ ) - 1:
UpperCAmelCase__ = SYMBOLS[numerals[index]]
UpperCAmelCase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case__: int ) -> str:
UpperCAmelCase__ = ''
UpperCAmelCase__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
UpperCAmelCase__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
UpperCAmelCase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case__: str = "/p089_roman.txt" ) -> int:
UpperCAmelCase__ = 0
with open(os.path.dirname(snake_case__ ) + roman_numerals_filename ) as filea:
UpperCAmelCase__ = filea.readlines()
for line in lines:
UpperCAmelCase__ = line.strip()
UpperCAmelCase__ = parse_roman_numerals(snake_case__ )
UpperCAmelCase__ = generate_roman_numerals(snake_case__ )
savings += len(snake_case__ ) - len(snake_case__ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 335 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ = sys.version_info >= (3, 10)
def __lowercase ( lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Optional[int]=None ):
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class _lowercase :
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
lowercase = 4_2
@dataclass
class _lowercase :
lowercase = 4_2
lowercase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class _lowercase :
lowercase = False
lowercase = True
lowercase = None
class _lowercase ( A__ ):
lowercase = 'titi'
lowercase = 'toto'
class _lowercase ( A__ ):
lowercase = 'titi'
lowercase = 'toto'
lowercase = 4_2
@dataclass
class _lowercase :
lowercase = 'toto'
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ : int = BasicEnum(self.foo )
@dataclass
class _lowercase :
lowercase = 'toto'
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = MixedTypeEnum(self.foo )
@dataclass
class _lowercase :
lowercase = None
lowercase = field(default=A__ , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
@dataclass
class _lowercase :
lowercase = list_field(default=[] )
lowercase = list_field(default=[1, 2, 3] )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowercase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _lowercase :
lowercase = field()
lowercase = field()
lowercase = field()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Dict = BasicEnum(self.required_enum )
@dataclass
class _lowercase :
lowercase = 4_2
lowercase = field()
lowercase = None
lowercase = field(default='toto' , metadata={'help': 'help message'} )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class _lowercase :
lowercase = False
lowercase = True
lowercase = None
@dataclass
class _lowercase :
lowercase = None
lowercase = field(default=A__ , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : argparse.ArgumentParser , snake_case : argparse.ArgumentParser ) -> Dict:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase_ : Dict = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != 'container'}
UpperCamelCase_ : int = {k: v for k, v in vars(lowerCamelCase_ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowerCamelCase_ ) and yy.get('choices' , lowerCamelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowerCamelCase_ ) , yy['type'](lowerCamelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument('--bar' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument('--baz' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument('--flag' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='?' )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : int = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(UpperCamelCase_ ) : List[str] = parser.parse_args_into_dataclasses(lowerCamelCase_ , look_for_args_file=lowerCamelCase_ )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=lowerCamelCase_ )
expected.add_argument('--baz' , default='toto' , type=lowerCamelCase_ , help='help message' )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='?' )
expected.add_argument('--baz' , type=lowerCamelCase_ , default=lowerCamelCase_ , const=lowerCamelCase_ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowerCamelCase_ , dest='baz' )
expected.add_argument('--opt' , type=lowerCamelCase_ , default=lowerCamelCase_ )
UpperCamelCase_ : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_ )
for dataclass_type in dataclass_types:
UpperCamelCase_ : List[Any] = HfArgumentParser(lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
UpperCamelCase_ : int = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
UpperCamelCase_ : Tuple = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
UpperCamelCase_ : int = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
UpperCamelCase_ : List[str] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , baz=lowerCamelCase_ , opt=lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Any = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : int = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCamelCase_ : List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase_ : int = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCamelCase_ : Tuple = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase_ : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
UpperCamelCase_ : int = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
@dataclass
class _lowercase :
lowercase = 'toto'
UpperCamelCase_ : Optional[int] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
UpperCamelCase_ : List[str] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
UpperCamelCase_ : int = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowerCamelCase_ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowerCamelCase_ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowerCamelCase_ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(
lowerCamelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase_ : int = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(lowerCamelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowerCamelCase_ , type=lowerCamelCase_ )
expected.add_argument('--bar' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='help message' )
expected.add_argument('--baz' , default=lowerCamelCase_ , type=lowerCamelCase_ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowerCamelCase_ )
expected.add_argument('--des' , nargs='+' , default=[] , type=lowerCamelCase_ )
UpperCamelCase_ : Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowerCamelCase_ )
for dataclass_type in dataclass_types:
UpperCamelCase_ : List[Any] = HfArgumentParser(lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Optional[int] = parser.parse_args([] )
self.assertEqual(lowerCamelCase_ , Namespace(foo=lowerCamelCase_ , bar=lowerCamelCase_ , baz=lowerCamelCase_ , ces=[] , des=[] ) )
UpperCamelCase_ : str = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(lowerCamelCase_ , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument('--required_str' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowerCamelCase_ , )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowerCamelCase_ , required=lowerCamelCase_ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=lowerCamelCase_ , )
expected.add_argument('--opt' , type=lowerCamelCase_ , default=lowerCamelCase_ )
expected.add_argument('--baz' , default='toto' , type=lowerCamelCase_ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowerCamelCase_ )
self.argparsersEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : List[Any] = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
UpperCamelCase_ : Tuple = parser.parse_dict(lowerCamelCase_ )[0]
UpperCamelCase_ : int = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Tuple = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 4_2,
}
self.assertRaises(lowerCamelCase_ , parser.parse_dict , lowerCamelCase_ , allow_extra_keys=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Union[str, Any] = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : Tuple = os.path.join(lowerCamelCase_ , 'temp_json' )
os.mkdir(lowerCamelCase_ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
UpperCamelCase_ : int = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = HfArgumentParser(lowerCamelCase_ )
UpperCamelCase_ : Dict = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : List[Any] = os.path.join(lowerCamelCase_ , 'temp_yaml' )
os.mkdir(lowerCamelCase_ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ : int = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
UpperCamelCase_ : str = BasicExample(**lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = HfArgumentParser(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 175 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : Dict =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase__ : int =get_sagemaker_input()
else:
lowerCamelCase__ : List[str] =get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : List[Any]=None ) ->List[str]:
if subparsers is not None:
lowerCamelCase__ : Union[str, Any] =subparsers.add_parser('config' , description=snake_case_ )
else:
lowerCamelCase__ : Tuple =argparse.ArgumentParser('Accelerate config command' , description=snake_case_ )
parser.add_argument(
'--config_file' , default=snake_case_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : Optional[int] =get_user_input()
if args.config_file is not None:
lowerCamelCase__ : Dict =args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
lowerCamelCase__ : Optional[Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : Tuple =config_command_parser()
lowerCamelCase__ : Tuple =parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main() | 126 | 0 |
from __future__ import annotations
from fractions import Fraction
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: Optional[int] = 11
SCREAMING_SNAKE_CASE_: Dict = int("1" + "0" * digit_len )
for num in range(_UpperCAmelCase , _UpperCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ):
solutions.append(f"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE_: int = 10
return solutions
def A_ ( _UpperCAmelCase = 2 ):
SCREAMING_SNAKE_CASE_: Dict = 1.0
for fraction in fraction_list(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = Fraction(_UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 360 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = StableDiffusionInpaintPipeline
_UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCAmelCase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase : Optional[int] = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self : int):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
SCREAMING_SNAKE_CASE_: List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE_: Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_: Tuple = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB").resize((64, 64))
SCREAMING_SNAKE_CASE_: List[str] = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: int = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : List[str]):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9E-3
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
SCREAMING_SNAKE_CASE_: str = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Dict = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
SCREAMING_SNAKE_CASE_: Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5E-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: List[str] = "stabilityai/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_: Tuple = PNDMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: Any = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Any = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: Any = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 127 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = SpeechTaTokenizer
lowercase__ = False
lowercase__ = True
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = SpeechTaTokenizer(__a)
_UpperCamelCase = AddedToken('''<mask>''' , lstrip=__a , rstrip=__a)
_UpperCamelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCAmelCase ( self , __a , __a=False , __a=20 , __a=5) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_input_output_texts(__a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(__a , clean_up_tokenization_spaces=__a)
return text, ids
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(__a) , 81)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=__a)
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}'''):
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
_UpperCamelCase = tokenizer.add_tokens(__a)
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
self.assertEqual(__a , __a)
self.assertEqual(__a , len(__a))
self.assertEqual(__a , all_size + len(__a))
_UpperCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__a)
self.assertGreaterEqual(len(__a) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
_UpperCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
_UpperCamelCase = tokenizer.add_special_tokens(__a)
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(__a)
self.assertNotEqual(__a , 0)
self.assertEqual(__a , __a)
self.assertEqual(__a , len(__a))
self.assertEqual(__a , all_size_a + len(__a))
_UpperCamelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__a)
self.assertGreaterEqual(len(__a) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(__a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
_UpperCamelCase = tokenizer.convert_tokens_to_ids(__a)
# fmt: off
self.assertListEqual(__a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
_UpperCamelCase = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
# Use custom sequence because this tokenizer does not handle numbers.
_UpperCamelCase = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
_UpperCamelCase = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__a , )
| 194 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=5 ) -> Union[str, Any]:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
_UpperCamelCase = torch.tensor(tokenizer.encode(__snake_case, add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
_UpperCamelCase = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
_UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCamelCase = logits[0, masked_index, :]
_UpperCamelCase = logits.softmax(dim=0 )
_UpperCamelCase , _UpperCamelCase = prob.topk(k=__snake_case, dim=0 )
_UpperCamelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
_UpperCamelCase = tokenizer.mask_token
_UpperCamelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_UpperCamelCase = predicted_token_bpe.replace('''\u2581''', ''' ''' )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__snake_case ), __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case, __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_a = CamembertTokenizer.from_pretrained("""camembert-base""")
_a = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
_a = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 194 | 1 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 ):
_UpperCAmelCase , _UpperCAmelCase : int = [], []
_UpperCAmelCase : Union[str, Any] = list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = sorted_examples[0]
def is_too_big(__lowerCAmelCase ):
return tok(__lowerCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase : Optional[Any] = new_src + " " + src
_UpperCAmelCase : Optional[int] = new_tgt + " " + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : str = src, tgt
else: # can fit, keep adding
_UpperCAmelCase , _UpperCAmelCase : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_UpperCAmelCase : List[Any] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
_UpperCAmelCase : Optional[int] = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(F"""packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(__lowerCAmelCase ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(__lowerCAmelCase ) )
for split in ["val", "test"]:
_UpperCAmelCase , _UpperCAmelCase : Dict = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.source""" )
shutil.copyfile(__lowerCAmelCase , save_path / F"""{split}.target""" )
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCAmelCase )
parser.add_argument("--save_path" , type=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = parser.parse_args()
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 322 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Dict = "AutoImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = "AutoTokenizer"
def __init__( self : Optional[Any] , A : Any=None , A : List[Any]=None , **A : List[str] ) -> Optional[int]:
lowercase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowercase_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
lowercase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
lowercase_ : List[Any] = self.image_processor
lowercase_ : int = False
def __call__( self : Tuple , *A : Optional[Any] , **A : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
lowercase_ : int = kwargs.pop('''images''' , A )
lowercase_ : List[str] = kwargs.pop('''text''' , A )
if len(A ) > 0:
lowercase_ : List[Any] = args[0]
lowercase_ : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase_ : str = self.image_processor(A , *A , **A )
if text is not None:
lowercase_ : Dict = self.tokenizer(A , **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase_ : int = encodings['''input_ids''']
return inputs
def A ( self : int , *A : Tuple , **A : Optional[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : Any , *A : List[str] , **A : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*A , **A )
@contextmanager
def A ( self : Dict ) -> Union[str, Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[int] = self.tokenizer
yield
lowercase_ : Dict = self.image_processor
lowercase_ : List[Any] = False
def A ( self : Any , A : Optional[Any] , A : Optional[int]=False , A : Any=None ) -> Dict:
if added_vocab is None:
lowercase_ : Tuple = self.tokenizer.get_added_vocab()
lowercase_ : Optional[int] = {}
while tokens:
lowercase_ : int = re.search(R'''<s_(.*?)>''' , A , re.IGNORECASE )
if start_token is None:
break
lowercase_ : Tuple = start_token.group(1 )
lowercase_ : Optional[Any] = re.search(RF'''</s_{key}>''' , A , re.IGNORECASE )
lowercase_ : int = start_token.group()
if end_token is None:
lowercase_ : int = tokens.replace(A , '''''' )
else:
lowercase_ : Any = end_token.group()
lowercase_ : int = re.escape(A )
lowercase_ : Tuple = re.escape(A )
lowercase_ : Optional[int] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , A , re.IGNORECASE )
if content is not None:
lowercase_ : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase_ : List[Any] = self.tokenajson(A , is_inner_value=A , added_vocab=A )
if value:
if len(A ) == 1:
lowercase_ : Optional[int] = value[0]
lowercase_ : str = value
else: # leaf nodes
lowercase_ : int = []
for leaf in content.split(R'''<sep/>''' ):
lowercase_ : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase_ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(A )
if len(output[key] ) == 1:
lowercase_ : Dict = output[key][0]
lowercase_ : int = tokens[tokens.find(A ) + len(A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=A , added_vocab=A )
if len(A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A ( self : Any ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def A ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 33 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ).loss
SCREAMING_SNAKE_CASE : int = -tf.math.reduce_mean(UpperCamelCase__ ).numpy()
SCREAMING_SNAKE_CASE : List[str] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 182 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCAmelCase__ :Optional[int] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCAmelCase__ :Any = token_dict['token']
lowerCAmelCase__ :int = Tokenizer(Unigram() )
lowerCAmelCase__ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCAmelCase__ :Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=__UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowerCAmelCase__ :List[str] = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCAmelCase__ :Optional[int] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :int = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :int = [files]
self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = trainers.UnigramTrainer(
vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , )
self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase )
self.add_unk_id()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = json.loads(self._tokenizer.to_str() )
lowerCAmelCase__ :List[str] = self.special_tokens['unk']['id']
lowerCAmelCase__ :Union[str, Any] = Tokenizer.from_str(json.dumps(__UpperCAmelCase ) )
| 369 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254 | 0 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
if n_term == "":
return []
SCREAMING_SNAKE_CASE_: list =[]
for temp in range(int(lowercase ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_UpperCAmelCase = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 173 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ):
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 173 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :Any , a :int=1_3 , a :List[str]=7 , a :List[str]=True , a :Any=True , a :Optional[Any]=False , a :Optional[int]=True , a :str=9_9 , a :Dict=3_2 , a :Optional[int]=5 , a :int=4 , a :str=3_7 , a :int="gelu" , a :Any=0.1 , a :Optional[int]=0.1 , a :Optional[int]=5_1_2 , a :Tuple=1_6 , a :Any=2 , a :str=0.02 , a :Optional[Any]=3 , a :Tuple=4 , a :Optional[Any]=None , ) -> Tuple:
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : int = seq_length
__UpperCamelCase : Union[str, Any] = is_training
__UpperCamelCase : Union[str, Any] = use_input_mask
__UpperCamelCase : Optional[int] = use_token_type_ids
__UpperCamelCase : str = use_labels
__UpperCamelCase : int = vocab_size
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Optional[Any] = num_hidden_layers
__UpperCamelCase : Dict = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Optional[int] = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : Any = type_vocab_size
__UpperCamelCase : Dict = type_sequence_label_size
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Optional[int] = num_labels
__UpperCamelCase : str = num_choices
__UpperCamelCase : Optional[int] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[str] = None
if self.use_token_type_ids:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : str = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Tuple = None
if self.use_labels:
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self :Optional[Any] , a :Optional[int] , a :Dict , a :Optional[int] , a :Tuple , a :Any , a :Optional[Any] , a :List[str] ) -> Tuple:
__UpperCamelCase : Optional[Any] = LlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )
__UpperCamelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :Any , a :Optional[Any] , a :Any , a :List[str] , a :Any , a :List[Any] , a :Union[str, Any] , a :str , a :List[str] , a :int , ) -> Union[str, Any]:
__UpperCamelCase : Dict = True
__UpperCamelCase : Optional[int] = LlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : Dict = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__UpperCamelCase : List[str] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
__UpperCamelCase : int = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :List[str] , a :List[str] , a :Optional[Any] , a :Tuple , a :Tuple , a :int , a :List[str] , a :Tuple , a :Tuple , a :Optional[Any] , ) -> Optional[Any]:
__UpperCamelCase : Tuple = LlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : Dict = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :str , a :str , a :Any , a :List[Any] , a :Union[str, Any] , a :Tuple , a :List[Any] , a :Union[str, Any] , a :int , a :Any , ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Tuple = LlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
__UpperCamelCase : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
__UpperCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Dict = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0]
__UpperCamelCase : List[str] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0]
# select random slice
__UpperCamelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _lowerCamelCase ( self :Tuple ) -> Tuple:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
__UpperCamelCase
) : Tuple = config_and_inputs
__UpperCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _a , _a , _a , unittest.TestCase):
'''simple docstring'''
_A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_A = (LlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = LlamaModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def _lowerCamelCase ( self :Any ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :Union[str, Any] ) -> str:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _lowerCamelCase ( self :List[Any] ) -> int:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : List[str] = type
self.model_tester.create_and_check_model(*snake_case_ )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : List[Any] = input_dict["""input_ids"""]
__UpperCamelCase : Optional[int] = input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : List[Any] = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = 3
__UpperCamelCase : List[Any] = """single_label_classification"""
__UpperCamelCase : str = input_dict["""input_ids"""]
__UpperCamelCase : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase : Union[str, Any] = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = """multi_label_classification"""
__UpperCamelCase : List[str] = input_dict["""input_ids"""]
__UpperCamelCase : List[Any] = input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase : Any = LlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase : List[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _lowerCamelCase ( self :Dict ) -> Tuple:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _lowerCamelCase ( self :List[str] , a :str ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : List[str] = ids_tensor([1, 1_0] , config.vocab_size )
__UpperCamelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Union[str, Any] = LlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
__UpperCamelCase : Union[str, Any] = original_model(snake_case_ ).last_hidden_state
__UpperCamelCase : str = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__UpperCamelCase : Any = {"""type""": scaling_type, """factor""": 10.0}
__UpperCamelCase : Optional[Any] = LlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
__UpperCamelCase : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
__UpperCamelCase : str = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self :Optional[Any] ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__UpperCamelCase : Tuple = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCamelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCamelCase : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Optional[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self :str ) -> List[Any]:
__UpperCamelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCamelCase : int = model(torch.tensor(snake_case_ ) )
# Expected mean on dim = -1
__UpperCamelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Dict = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
__UpperCamelCase : List[str] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__UpperCamelCase : Optional[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCamelCase : Optional[Any] = model(torch.tensor(snake_case_ ) )
# Expected mean on dim = -1
__UpperCamelCase : Any = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCamelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__UpperCamelCase : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCamelCase : List[str] = model(torch.tensor(snake_case_ ) )
__UpperCamelCase : Optional[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCamelCase : int = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , snake_case_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : Tuple = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCamelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCamelCase : List[Any] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCamelCase : Tuple = tokenizer.encode(snake_case_ , return_tensors="pt" )
__UpperCamelCase : Any = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=snake_case_ )
# greedy generation outputs
__UpperCamelCase : Optional[Any] = model.generate(snake_case_ , max_new_tokens=6_4 , top_p=snake_case_ , temperature=1 , do_sample=snake_case_ )
__UpperCamelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ ) | 371 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = 42
_A = 42
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :int ) -> Tuple:
__UpperCamelCase : list[list[Edge]] = [[] for _ in range(a )]
__UpperCamelCase : str = size
def __getitem__( self :str , a :int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self :Any ) -> List[str]:
return self._size
def _lowerCamelCase ( self :Dict , a :int , a :int , a :int ) -> Any:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(a , a ) )
def _lowerCamelCase ( self :List[str] , a :int , a :int ) -> int | None:
__UpperCamelCase : Union[str, Any] = deque([start_vertex] )
__UpperCamelCase : list[int | None] = [None] * self.size
__UpperCamelCase : Dict = 0
while queue:
__UpperCamelCase : Tuple = queue.popleft()
__UpperCamelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCamelCase : Optional[Any] = current_distance + edge.weight
__UpperCamelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(a , a )
and new_distance >= dest_vertex_distance
):
continue
__UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 0 |
from collections.abc import Iterable
from typing import Any
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =value
__UpperCamelCase : Node | None =None # Added in order to delete a node easier
__UpperCamelCase : Node | None =None
__UpperCamelCase : Node | None =None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if new_children is not None: # reset its kids
__UpperCamelCase : Optional[int] =node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCAmelCase ): # If it is the right children
__UpperCamelCase : str =new_children
else:
__UpperCamelCase : List[Any] =new_children
else:
__UpperCamelCase : int =new_children
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __lowercase ( self ):
"""simple docstring"""
return self.root is None
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =Node(_UpperCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
__UpperCamelCase : List[Any] =new_node # set its root
else: # Tree is not empty
__UpperCamelCase : Any =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__UpperCamelCase : Optional[Any] =new_node # We insert the new node in a leaf
break
else:
__UpperCamelCase : List[Any] =parent_node.left
else:
if parent_node.right is None:
__UpperCamelCase : Any =new_node
break
else:
__UpperCamelCase : Optional[Any] =parent_node.right
__UpperCamelCase : str =parent_node
def __lowercase ( self , *lowerCamelCase__ ):
"""simple docstring"""
for value in values:
self.__insert(_UpperCAmelCase )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
__UpperCamelCase : Dict =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__UpperCamelCase : Tuple =node.left if value < node.value else node.right
return node
def __lowercase ( self , lowerCamelCase__ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
__UpperCamelCase : Dict =self.root
if not self.empty():
while node.right is not None:
__UpperCamelCase : Optional[int] =node.right
return node
def __lowercase ( self , lowerCamelCase__ = None ):
"""simple docstring"""
if node is None:
__UpperCamelCase : Optional[Any] =self.root
if self.root is None:
return None
if not self.empty():
__UpperCamelCase : List[str] =self.root
while node.left is not None:
__UpperCamelCase : int =node.left
return node
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =self.search(_UpperCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCAmelCase , _UpperCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCAmelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCAmelCase , node.left )
else:
__UpperCamelCase : Optional[int] =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__UpperCamelCase : Any =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __lowercase ( self , lowerCamelCase__=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if node:
self.inorder(_UpperCAmelCase , node.left )
arr.append(node.value )
self.inorder(_UpperCAmelCase , node.right )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : list[int] =[]
self.inorder(_UpperCAmelCase , _UpperCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def A ( a_ ) -> list[Node]:
__UpperCamelCase : str =[]
if curr_node is not None:
__UpperCamelCase : Union[str, Any] =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def A ( ) -> None:
__UpperCamelCase : str =(8, 3, 6, 1, 10, 14, 13, 4, 7)
__UpperCamelCase : Tuple =BinarySearchTree()
for i in testlist:
t.insert(__snake_case )
# Prints all the elements of the list in order traversal
print(__snake_case )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' ,t.get_max().value ) # type: ignore
print('Min Value: ' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__snake_case )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 71 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase = "▁" , _UpperCAmelCase = True , _UpperCAmelCase = "<unk>" , _UpperCAmelCase = "</s>" , _UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
__A : Dict = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__A : List[Any] = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__A : List[str] = token_dict['token']
__A : str = Tokenizer(Unigram())
__A : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}') , ' '),
normalizers.Lowercase(),
])
__A : Union[str, Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
__A : Any = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase)
__A : Dict = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__A : Any = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : str = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Dict = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = json.loads(self._tokenizer.to_str())
__A : Union[str, Any] = self.special_tokens['unk']['id']
__A : str = Tokenizer.from_str(json.dumps(_UpperCAmelCase)) | 190 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
snake_case : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> PegasusForConditionalGeneration:
'''simple docstring'''
snake_case : List[str] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
snake_case : Union[str, Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
snake_case : Any = torch_model.model.state_dict()
snake_case : Any = {}
for k, v in tf_weights.items():
snake_case : str = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
snake_case : int = v.T
snake_case : int = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
snake_case : List[Any] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
snake_case : int = mapping['''shared.weight''']
snake_case : List[str] = mapping['''shared.weight''']
snake_case : Tuple = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
snake_case ,snake_case : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
snake_case : Any = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = {}
snake_case : Any = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='''converting tf checkpoint to dict''' ):
snake_case : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : Any = array
return tf_weights
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = Path(SCREAMING_SNAKE_CASE__ ).parent.name
snake_case : List[Any] = task_specific_params[F'summarization_{dataset}']['''max_position_embeddings''']
snake_case : str = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
snake_case : Tuple = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
snake_case : Any = task_specific_params[F'summarization_{dataset}']
if dataset == "large":
snake_case : Union[str, Any] = task_specific_params
snake_case : List[Any] = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ = parser.parse_args()
if args.save_dir is None:
lowercase__ = Path(args.tf_ckpt_path).parent.name
lowercase__ = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 83 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Dict=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = parent
snake_case : str = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Optional[int] = is_training
snake_case : Optional[int] = use_input_mask
snake_case : List[Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : List[Any] = embedding_size
snake_case : Any = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_hidden_groups
snake_case : List[str] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : List[Any] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : Optional[Any] = num_choices
snake_case : Optional[int] = scope
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : List[Any] = None
snake_case : int = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : str = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : str = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
snake_case : int = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = self.num_labels
snake_case : List[str] = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = self.num_labels
snake_case : int = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : str = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
snake_case : int = self.num_choices
snake_case : List[Any] = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : List[str] = config_and_inputs
snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = True
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
snake_case : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
snake_case : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = AlbertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : int = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[Any] = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = AlbertModel.from_pretrained('''albert-base-v2''' )
snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
snake_case : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : Any = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 185 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A__ : List[str] = get_logger(__name__)
A__ : str = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
for processor in self:
__lowerCamelCase : str = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
__lowerCamelCase : Tuple = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : int = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
__lowerCamelCase : Optional[int] = temperature
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Dict = scores / self.temperature
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__lowerCamelCase : str = top_p
__lowerCamelCase : Tuple = filter_value
__lowerCamelCase : Tuple = min_tokens_to_keep
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : Any = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] )
__lowerCamelCase : int = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value )
__lowerCamelCase : Tuple = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 )
__lowerCamelCase : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : Tuple = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
# min tokens to keep
__lowerCamelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1]
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
__lowerCamelCase : List[str] = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = filter_value
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape
__lowerCamelCase : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCamelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCamelCase , __lowerCamelCase : Tuple = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCamelCase : List[Any] = topk_scores.flatten()
__lowerCamelCase : Union[str, Any] = topk_indices.flatten() + shift
__lowerCamelCase : Tuple = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Any = bos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__lowerCamelCase : List[Any] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Tuple = max_length
__lowerCamelCase : Any = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCamelCase : List[str] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__lowerCamelCase : str = min_length
__lowerCamelCase : Optional[int] = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = begin_index
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : int = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = dict(SCREAMING_SNAKE_CASE_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : str = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.intaa(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[str] = scores.shape[0]
__lowerCamelCase : Tuple = self.force_token_array[generation_idx]
__lowerCamelCase : List[Any] = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('inf' )
__lowerCamelCase : Any = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCamelCase : str = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) )
return new_scores
__lowerCamelCase : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = generate_config.eos_token_id
__lowerCamelCase : Dict = generate_config.no_timestamps_token_id
__lowerCamelCase : Tuple = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE_ , 'max_initial_timestamp_index' ):
__lowerCamelCase : str = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : Tuple = model_config.vocab_size
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return jnp.where(
SCREAMING_SNAKE_CASE_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : str = jnp.where(
SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCamelCase : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Dict = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
| 185 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_UpperCAmelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[int] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase , repo_id='test-config' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
A_ : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase , repo_id='valid_org/test-config-org' , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
A_ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
A_ : Optional[int] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A_ : Optional[int] = c.n_embd + 1 # int
A_ : List[str] = c.resid_pdrop + 1.0 # float
A_ : str = not c.scale_attn_weights # bool
A_ : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowercase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowercase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowercase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowercase , c.summary_type , 'mismatch for key: summary_type' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = PretrainedConfig()
A_ : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
A_ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase , lowercase )]
if len(lowercase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(lowercase )}.''' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
A_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = mock.Mock()
A_ : int = 5_0_0
A_ : Union[str, Any] = {}
A_ : List[str] = HTTPError
A_ : List[Any] = {}
# Download this model to make sure it's in the cache.
A_ : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head:
A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = AutoConfig.from_pretrained('bert-base-cased' )
A_ : Tuple = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase )
A_ : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A_ : int = ['config.42.0.0.json']
A_ : str = 7_6_8
configuration.save_pretrained(lowercase )
shutil.move(os.path.join(lowercase , 'config.4.0.0.json' ) , os.path.join(lowercase , 'config.42.0.0.json' ) )
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
A_ : List[Any] = 'v4.0.0'
A_ , A_ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase , return_unused_kwargs=lowercase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A_ : Optional[int] = 'v3.0.0'
A_ : List[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 192 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase=None , **lowercase ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
A_ : List[Any] = model
A_ : Dict = kwargs.get('model_save_dir' , lowercase )
A_ : List[str] = kwargs.get('latest_model_name' , lowercase )
def __call__( self , **lowercase ):
"""simple docstring"""
A_ : str = {k: np.array(lowercase ) for k, v in kwargs.items()}
return self.model.run(lowercase , lowercase )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
A_ : List[Any] = 'CPUExecutionProvider'
return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
A_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ : Optional[Any] = self.model_save_dir.joinpath(lowercase )
if src_path.exists():
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self , lowercase , **lowercase , ):
"""simple docstring"""
if os.path.isfile(lowercase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase , exist_ok=lowercase )
# saving model weights/files
self._save_pretrained(lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase ):
A_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase )
A_ : Dict = Path(lowercase )
# load model from hub
else:
# download model
A_ : List[str] = hf_hub_download(
repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , )
A_ : int = Path(lowercase ).parent
A_ : Optional[Any] = Path(lowercase ).name
A_ : Any = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase )
return cls(model=lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = None
if len(str(lowercase ).split('@' ) ) == 2:
A_ , A_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
| 192 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class a_ ( _a ):
lowercase = ["""input_values""", """padding_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 24000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = chunk_length_s
UpperCamelCase = overlap
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Tuple:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase = True
UpperCamelCase = bool(
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray(_SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(_SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
UpperCamelCase = None
UpperCamelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase = min(array.shape[0] for array in raw_audio )
UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase = max(array.shape[0] for array in raw_audio )
UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase = """max_length"""
else:
UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase = self.pad(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if padding:
UpperCamelCase = padded_inputs.pop("""attention_mask""" )
UpperCamelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
UpperCamelCase = example[..., None]
input_values.append(example.T )
UpperCamelCase = input_values
if return_tensors is not None:
UpperCamelCase = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 321 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
a = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def lowercase (snake_case__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCAmelCase = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCAmelCase = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , snake_case__ )
# ffn -> feed_forward
lowerCAmelCase = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCAmelCase = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCAmelCase = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCAmelCase = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCAmelCase = """rwkv.""" + name
lowerCAmelCase = weight
return state_dict
def lowercase (snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : Optional[int]=False , snake_case__ : List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCAmelCase = 50_277
lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCAmelCase = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
lowerCAmelCase = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
lowerCAmelCase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
lowerCAmelCase = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
lowerCAmelCase = hf_hub_download(snake_case__ , snake_case__ )
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCAmelCase = convert_state_dict(snake_case__ )
# 4. Split in shards and save
lowerCAmelCase , lowerCAmelCase = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + """\n"""
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCAmelCase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 155 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : List[str] = 'src/diffusers'
# Matches is_xxx_available()
A__ : Dict = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
A__ : str = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
A__ : List[str] = '\n{0} = None\n'
A__ : str = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
A__ : Optional[Any] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _snake_case ( lowerCamelCase__ : List[str] ) -> Tuple:
lowerCamelCase_ : Any =_re_backend.findall(lowerCamelCase__ )
if len(lowerCamelCase__ ) == 0:
return None
return "_and_".join(lowerCamelCase__ )
def _snake_case ( ) -> str:
with open(os.path.join(lowerCamelCase__ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ : Optional[Any] =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ : List[str] =0
lowerCamelCase_ : Optional[Any] ={}
# Go through the end of the file
while line_index < len(lowerCamelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ : Dict =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowerCamelCase_ : Dict =[]
# Until we unindent, add backend objects to the list
while line_index < len(lowerCamelCase__ ) and len(lines[line_index] ) > 1:
lowerCamelCase_ : Tuple =lines[line_index]
lowerCamelCase_ : List[Any] =_re_single_line_import.search(lowerCamelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCamelCase__ ) > 0:
lowerCamelCase_ : Dict =objects
else:
line_index += 1
return backend_specific_objects
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> int:
if name.isupper():
return DUMMY_CONSTANT.format(lowerCamelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCamelCase__ , lowerCamelCase__ )
else:
return DUMMY_CLASS.format(lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Dict=None ) -> List[str]:
if backend_specific_objects is None:
lowerCamelCase_ : Optional[int] =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ : Optional[int] ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ : Tuple ="[" + ", ".join(F"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
lowerCamelCase_ : str ="# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCamelCase__ , lowerCamelCase__ ) for o in objects] )
lowerCamelCase_ : Any =dummy_file
return dummy_files
def _snake_case ( lowerCamelCase__ : int=False ) -> List[str]:
lowerCamelCase_ : int =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ : Tuple ={"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCamelCase_ : List[Any] =os.path.join(lowerCamelCase__ , "utils" )
lowerCamelCase_ : Dict ={
backend: os.path.join(lowerCamelCase__ , F"""dummy_{short_names.get(lowerCamelCase__ , lowerCamelCase__ )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCamelCase_ : Union[str, Any] ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ : List[Any] =f.read()
else:
lowerCamelCase_ : List[Any] =""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(lowerCamelCase__ , lowerCamelCase__ )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"""diffusers.utils.dummy_{short_names.get(lowerCamelCase__ , lowerCamelCase__ )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A__ : Union[str, Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 358 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : Dict =(32, 32)
lowerCamelCase_ : List[Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCAmelCase__ ( self : int ):
def extract(*snake_case__ : Dict , **snake_case__ : int ):
class lowercase__ :
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =torch.ones([0] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : List[str] =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : List[Any] =self.dummy_vae
lowerCamelCase_ : Any =self.dummy_text_encoder
lowerCamelCase_ : List[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : List[str] =77
lowerCamelCase_ : Optional[int] =self.dummy_image.to(snake_case__ )
lowerCamelCase_ : Any =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[Any] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : int =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : Optional[int] =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : str =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : str =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , return_dict=snake_case__ , )[0]
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : List[str] =np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : Any =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Tuple =self.dummy_vae
lowerCamelCase_ : Union[str, Any] =self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : Tuple =77
lowerCamelCase_ : str =self.dummy_image.to(snake_case__ )
# put models in fp16
lowerCamelCase_ : Optional[Any] =unet.half()
lowerCamelCase_ : Dict =vae.half()
lowerCamelCase_ : str =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : int =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="np" , image=snake_case__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : Any =init_image.resize((760, 504) )
lowerCamelCase_ : List[str] ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[int] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : List[Any] =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Optional[int] =output.images[0]
lowerCamelCase_ : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase_ : Optional[int] =np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : Any =init_image.resize((768, 512) )
lowerCamelCase_ : Optional[int] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCamelCase_ : Dict ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[Any] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : Dict =torch.manual_seed(0 )
lowerCamelCase_ : int =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : int =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 209 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase_ ( A__ : List[Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 2
while True:
if is_prime(A__ ):
yield num
num += 1
def UpperCamelCase_ ( A__ : Dict = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda A__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 |
lowerCAmelCase : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 253 | 0 |
'''simple docstring'''
from __future__ import annotations
__A : Dict = list[tuple[int, int]]
__A : List[str] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : List[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Node | None , ) -> Any:
lowerCAmelCase_ : Optional[int] = pos_x
lowerCAmelCase_ : str = pos_y
lowerCAmelCase_ : List[Any] = (pos_y, pos_x)
lowerCAmelCase_ : Optional[int] = goal_x
lowerCAmelCase_ : Dict = goal_y
lowerCAmelCase_ : Dict = g_cost
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : List[Any] = self.calculate_heuristic()
def __lowercase ( self : int ) -> float:
lowerCAmelCase_ : List[Any] = abs(self.pos_x - self.goal_x )
lowerCAmelCase_ : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Dict , lowerCamelCase : int ) -> bool:
return self.f_cost < other.f_cost
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : tuple[int, int] , lowerCamelCase : tuple[int, int] ) -> int:
lowerCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
lowerCAmelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase )
lowerCAmelCase_ : str = [self.start]
lowerCAmelCase_ : list[Node] = []
lowerCAmelCase_ : Any = False
def __lowercase ( self : List[Any] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ : Optional[Any] = True
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
lowerCAmelCase_ : int = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
lowerCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self : Any , lowerCamelCase : Node ) -> list[Node]:
lowerCAmelCase_ : List[str] = []
for action in delta:
lowerCAmelCase_ : Dict = parent.pos_x + action[1]
lowerCAmelCase_ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node | None ) -> Path:
lowerCAmelCase_ : Tuple = node
lowerCAmelCase_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ : Dict = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Any = (0, 0)
__A : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__A : int = GreedyBestFirst(init, goal)
__A : Dict = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Tuple = 2
for elem in grid:
print(elem)
| 364 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : List[Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Tuple:
lowerCAmelCase_ : str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Union[str, Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Any = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _A ( UpperCamelCase__):
SCREAMING_SNAKE_CASE : Tuple = '''bridgetower_vision_model'''
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=288 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1e-05 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : str = initializer_factor
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = stop_gradient
SCREAMING_SNAKE_CASE_ : List[Any] = share_layernorm
SCREAMING_SNAKE_CASE_ : Tuple = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
if config_dict.get('model_type' ) == "bridgetower":
SCREAMING_SNAKE_CASE_ : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _A ( UpperCamelCase__):
SCREAMING_SNAKE_CASE : List[str] = '''bridgetower_text_model'''
def __init__( self , _SCREAMING_SNAKE_CASE=5_0265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=514 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1e-05 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = position_embedding_type
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
SCREAMING_SNAKE_CASE_ : Tuple = pad_token_id
SCREAMING_SNAKE_CASE_ : int = bos_token_id
SCREAMING_SNAKE_CASE_ : List[str] = eos_token_id
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
if config_dict.get('model_type' ) == "bridgetower":
SCREAMING_SNAKE_CASE_ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _A ( UpperCamelCase__):
SCREAMING_SNAKE_CASE : Tuple = '''bridgetower'''
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1e-05 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="add" , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('text_config_dict' , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('vision_config_dict' , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Any = share_cross_modal_transformer_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = share_link_tower_layers
SCREAMING_SNAKE_CASE_ : List[str] = link_tower_type
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Dict = init_layernorm_from_vision_encoder
if text_config is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
SCREAMING_SNAKE_CASE_ : Any = BridgeTowerTextConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = BridgeTowerVisionConfig(**__lowerCamelCase )
@classmethod
def UpperCAmelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : int = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = self.__class__.model_type
return output
| 253 |
from collections import deque
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = process_name # process name
UpperCamelCase__: Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__: Tuple = arrival_time
UpperCamelCase__: str = burst_time # remaining burst time
UpperCamelCase__: int = 0 # total time of the process wait in ready queue
UpperCamelCase__: List[Any] = 0 # time from arrival time to completion time
class _a :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: deque[Process] , __lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__: Optional[Any] = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__: Optional[int] = queue
# current time
UpperCamelCase__: Any = current_time
# finished process is in this sequence queue
UpperCamelCase__: deque[Process] = deque()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Dict = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: int = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
UpperCamelCase__: int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCamelCase__: Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__: List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: deque[Process] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
UpperCamelCase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__: Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__: Optional[int] = 0
# set the finish time
UpperCamelCase__: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__: Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__: Any = Process('''P1''', 0, 53)
A__: Tuple = Process('''P2''', 0, 17)
A__: Tuple = Process('''P3''', 0, 68)
A__: Tuple = Process('''P4''', 0, 24)
A__: Any = 3
A__: str = [17, 25]
A__: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__: str = Process('''P1''', 0, 53)
A__: Union[str, Any] = Process('''P2''', 0, 17)
A__: Optional[Any] = Process('''P3''', 0, 68)
A__: str = Process('''P4''', 0, 24)
A__: Any = 3
A__: Optional[Any] = [17, 25]
A__: Any = deque([Pa, Pa, Pa, Pa])
A__: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A__: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 149 | 0 |
'''simple docstring'''
import sys
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = [[0 for x in range(A__ )] for x in range(A__ )]
UpperCamelCase = [[0 for x in range(A__ )] for x in range(A__ )]
for chain_length in range(2 , A__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase = a + chain_length - 1
UpperCamelCase = sys.maxsize
for c in range(A__ , A__ ):
UpperCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase = cost
UpperCamelCase = c
return matrix, sol
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
if i == j:
print('A' + str(A__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(A__ , A__ , optimal_solution[i][j] )
print_optiomal_solution(A__ , optimal_solution[i][j] + 1 , A__ )
print(')' , end=' ' )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase = len(A__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase = matrix_chain_order(A__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(A__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [0] * len(A__ )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
UpperCamelCase = queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print('Cycle exists' )
else:
print(A__ )
# Adjacency List of Graph
_lowerCamelCase : Optional[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 249 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *lowercase_ , **lowercase_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ : List[str] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = vqa_pipeline(lowercase_ , top_k=1 )
self.assertEqual(
lowercase_ , [
[{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}],
[{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}],
] , )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Any = "How many cats are there?"
UpperCAmelCase_ : List[str] = vqa_pipeline(image=lowercase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
lowercase_ , [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}, {"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}] )
UpperCAmelCase_ : str = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
lowercase_ , [{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}, {"score": ANY(lowercase_ ), "answer": ANY(lowercase_ )}] )
@slow
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : List[Any] = "How many cats are there?"
UpperCAmelCase_ : str = vqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ : Tuple = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ : int = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 61 |
"""simple docstring"""
def __a ( __lowerCamelCase = 3, __lowerCamelCase = 7, __lowerCamelCase = 100_0000 ):
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 1
for current_denominator in range(1, limit + 1 ):
UpperCAmelCase_ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : List[Any] = current_numerator
UpperCAmelCase_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 61 | 1 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = 'microsoft/speecht5_tts'
_SCREAMING_SNAKE_CASE : Any = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_SCREAMING_SNAKE_CASE : str = 'text_reader'
_SCREAMING_SNAKE_CASE : Any = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Any = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : int = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['text']
_SCREAMING_SNAKE_CASE : List[Any] = ['audio']
def __A ( self ) -> str:
'''simple docstring'''
if self.post_processor is None:
__UpperCAmelCase : Optional[Any] = """microsoft/speecht5_hifigan"""
super().setup()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.pre_processor(text=__lowerCAmelCase , return_tensors="""pt""" , truncation=__lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__UpperCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__UpperCAmelCase : Dict = torch.tensor(embeddings_dataset[7_305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCAmelCase ).cpu().detach()
| 359 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : str = depths
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : str = window_size
__UpperCAmelCase : int = mlp_ratio
__UpperCAmelCase : Union[str, Any] = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = use_absolute_embeddings
__UpperCAmelCase : Any = patch_norm
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
__UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = self.type_sequence_label_size
__UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = SwinvaModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
def __A ( self ) -> Any:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__UpperCAmelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : str = outputs.attentions
__UpperCAmelCase : Any = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = config.window_size**2
__UpperCAmelCase : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : Dict = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
__UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swinv2 has a different seq_length
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape
__UpperCAmelCase : Any = (
reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def __A ( self ) -> int:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__UpperCAmelCase )
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=9_9 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Dict=3_7 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict="None" , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Optional[Any]=None , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = position_biased_input
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = scope
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = TFDebertaVaForMaskedLM(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFDebertaVaForSequenceClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFDebertaVaForTokenClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = TFDebertaVaForQuestionAnswering(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : List[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Optional[Any] = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class lowerCamelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4 )
| 165 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase__ : Optional[str] = field(
default='./' ,metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for training.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.1 ,metadata={'help': 'Value of weight decay.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 ,metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase__ : Optional[float] = field(default=2E-4 ,metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase__ : Optional[str] = field(default='cosine' ,metadata={'help': 'Learning rate.'} )
lowerCamelCase__ : Optional[int] = field(
default=7_5_0 ,metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_6 ,metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase__ : Optional[int] = field(default=5_0_0_0_0 ,metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Training seed.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_2_4 ,metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase__ : Optional[float] = field(default=0.2 ,metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase__ : Optional[int] = field(default=2_5_6 ,metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase__ : Optional[int] = field(default=0 ,metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.9_5 ,metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0 ,metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase__ : Optional[int] = field(
default=2_0_0 ,metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='eval_results.json' ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='0' ,metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} ,)
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot' ,metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot-clean' ,metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 ,metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0_0 ,metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0 ,metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.2_5 ,metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1.5 ,metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.7 ,metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.8_5 ,metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2' ,metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot-train' ,metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 ,metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase__ : Optional[str] = field(
default='tokenized-codeparrot-train' ,metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2-large' ,metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of the created model.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
| 165 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : int = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[int, int]:
lowerCAmelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase__ : int = x_den * y_den * z_den
lowerCAmelCase__ : int = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 35 ) -> int:
lowerCAmelCase__ : set = set()
lowerCAmelCase__ : int
lowerCAmelCase__ : Fraction = Fraction(0 )
lowerCAmelCase__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase__ : Any = x_num * y_den + x_den * y_num
lowerCAmelCase__ : Dict = x_den * y_den
lowerCAmelCase__ : Optional[Any] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase__ : Dict = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
lowerCAmelCase__ : int = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase__ : Tuple = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : Tuple = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[Any] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase__ : Union[str, Any] = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=-1
lowerCAmelCase__ : Tuple = x_num * y_num
lowerCAmelCase__ : Union[str, Any] = x_den * y_num + x_num * y_den
lowerCAmelCase__ : Optional[int] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase__ : Dict = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
lowerCAmelCase__ : Any = x_num * x_num * y_num * y_num
lowerCAmelCase__ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : int = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase__ : Any = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 307 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=99 , _lowerCamelCase=13 , _lowerCamelCase=16 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=30 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=None , ):
a :Union[str, Any] = parent
a :Dict = batch_size
a :Tuple = decoder_seq_length
# For common tests
a :int = self.decoder_seq_length
a :Optional[int] = is_training
a :Optional[Any] = use_attention_mask
a :Tuple = use_labels
a :Any = vocab_size
a :Union[str, Any] = d_model
a :str = d_model
a :int = decoder_layers
a :Tuple = decoder_layers
a :Optional[int] = decoder_ffn_dim
a :str = decoder_attention_heads
a :Optional[int] = decoder_attention_heads
a :List[Any] = eos_token_id
a :Tuple = bos_token_id
a :Any = pad_token_id
a :Union[str, Any] = decoder_start_token_id
a :Optional[Any] = use_cache
a :Optional[Any] = max_position_embeddings
a :Dict = None
a :Optional[Any] = decoder_seq_length
a :Optional[int] = 2
a :int = 1
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
a :int = None
if self.use_attention_mask:
a :Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
a :Dict = None
if self.use_labels:
a :Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
a :List[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
a :int = True
a :Any = TrOCRDecoder(config=_lowerCamelCase ).to(_lowerCamelCase ).eval()
a :Tuple = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
a :Any = model(_lowerCamelCase , use_cache=_lowerCamelCase )
a :List[Any] = model(_lowerCamelCase )
a :Union[str, Any] = model(_lowerCamelCase , use_cache=_lowerCamelCase )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) )
self.parent.assertTrue(len(_lowerCamelCase ) == len(_lowerCamelCase ) + 1 )
a :Optional[Any] = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
a :int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
a :str = torch.cat([input_ids, next_tokens] , dim=-1 )
a :List[Any] = model(_lowerCamelCase )['''last_hidden_state''']
a :List[Any] = model(_lowerCamelCase , past_key_values=_lowerCamelCase )['''last_hidden_state''']
# select random slice
a :List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a :Optional[int] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
a :List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.prepare_config_and_inputs()
a , a , a , a :Union[str, Any] = config_and_inputs
a :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (TrOCRForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowerCamelCase )
a :Tuple = ConfigTester(self , config_class=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 94 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _A ( A__=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
__lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase = defaults.commands
if not args.tpu_name:
__lowercase = defaults.tpu_name
if not args.tpu_zone:
__lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , A__ ):
__lowercase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
__lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase = '''; '''.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(A__ )}" )
return
subprocess.run(A__ )
print('''Successfully setup pod.''' )
def _A ( ):
"""simple docstring"""
__lowercase = tpu_command_parser()
__lowercase = parser.parse_args()
tpu_command_launcher(A__ )
| 104 | 0 |
import math
def __A ( __lowerCamelCase ) -> str:
a = 0
a = 0
while num > 0:
a = num % 8
a = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(__lowerCamelCase )}'
def __A ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 351 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Any = """Hello, World!"""
UpperCAmelCase : int = """en_XX"""
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
a__ : Any =Path("data_bin" )
a__ : Optional[Any] =FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
a__ : int =xmod.model.encoder.sentence_encoder
a__ : Any =XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
a__ : Union[str, Any] =xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE )
a__ : str =XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
a__ : Tuple =xmod_sent_encoder.embed_tokens.weight
a__ : int =xmod_sent_encoder.embed_positions.weight
a__ : List[str] =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
a__ : Tuple =xmod_sent_encoder.layernorm_embedding.weight
a__ : Any =xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a__ : List[Any] =model.roberta.encoder.layer[i]
a__ : str =xmod_sent_encoder.layers[i]
# self attention
a__ : Union[str, Any] =layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
a__ : Any =xmod_layer.self_attn.q_proj.weight
a__ : Optional[Any] =xmod_layer.self_attn.q_proj.bias
a__ : Optional[int] =xmod_layer.self_attn.k_proj.weight
a__ : Optional[int] =xmod_layer.self_attn.k_proj.bias
a__ : Any =xmod_layer.self_attn.v_proj.weight
a__ : List[str] =xmod_layer.self_attn.v_proj.bias
# self-attention output
a__ : Union[str, Any] =layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
a__ : Any =xmod_layer.self_attn.out_proj.weight
a__ : str =xmod_layer.self_attn.out_proj.bias
a__ : Dict =xmod_layer.self_attn_layer_norm.weight
a__ : Any =xmod_layer.self_attn_layer_norm.bias
# intermediate
a__ : List[Any] =layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
a__ : Any =xmod_layer.fca.weight
a__ : str =xmod_layer.fca.bias
# output
a__ : Union[str, Any] =layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
a__ : int =xmod_layer.fca.weight
a__ : str =xmod_layer.fca.bias
a__ : str =xmod_layer.final_layer_norm.weight
a__ : Optional[Any] =xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
a__ : Union[str, Any] =xmod_layer.adapter_layer_norm.weight
a__ : List[str] =xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
a__ : int =bert_output.adapter_modules[lang_code]
a__ : List[Any] =xmod_layer.adapter_modules[lang_code]
a__ : List[str] =from_adapter.fca.weight
a__ : List[Any] =from_adapter.fca.bias
a__ : Optional[int] =from_adapter.fca.weight
a__ : List[Any] =from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
a__ : Any =xmod_sent_encoder.layer_norm.weight
a__ : Tuple =xmod_sent_encoder.layer_norm.bias
if classification_head:
a__ : int =xmod.model.classification_heads["mnli"].dense.weight
a__ : Union[str, Any] =xmod.model.classification_heads["mnli"].dense.bias
a__ : List[str] =xmod.model.classification_heads["mnli"].out_proj.weight
a__ : Any =xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
a__ : Optional[Any] =xmod.model.encoder.lm_head.dense.weight
a__ : Dict =xmod.model.encoder.lm_head.dense.bias
a__ : List[str] =xmod.model.encoder.lm_head.layer_norm.weight
a__ : Any =xmod.model.encoder.lm_head.layer_norm.bias
a__ : Dict =xmod.model.encoder.lm_head.weight
a__ : Optional[int] =xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
a__ : Tuple =xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
a__ : List[Any] =model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
a__ : Optional[int] =xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
a__ : Any =xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
a__ : Any =torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
a__ : List[str] =torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCAmelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 95 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 0 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_a : List[Any]= None
_a : str= "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_a : Dict= [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCamelCase :
"""simple docstring"""
UpperCAmelCase : bool = True
UpperCAmelCase : Optional[str] = None
# Automatically constructed
UpperCAmelCase : ClassVar[str] = "PIL.Image.Image"
UpperCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase : str = field(default="""Image""" , init=__snake_case , repr=__snake_case )
def __call__(self : Tuple) -> Union[str, Any]:
return self.pa_type
def _lowercase (self : Optional[Any] , _A : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(lowerCamelCase_ , lowerCamelCase_):
__snake_case : Optional[int] = np.array(lowerCamelCase_)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase_ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase_)
elif isinstance(lowerCamelCase_ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase_)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
f"An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.")
def _lowercase (self : List[str] , _A : dict , _A : Union[str, Any]=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of \'path\' or \'bytes\' but both are None in {value}.")
else:
if is_local_path(lowerCamelCase_):
__snake_case : Any = PIL.Image.open(lowerCamelCase_)
else:
__snake_case : int = path.split('::')[-1]
try:
__snake_case : Optional[int] = string_to_dict(lowerCamelCase_ , config.HUB_DATASETS_URL)["""repo_id"""]
__snake_case : str = token_per_repo_id.get(lowerCamelCase_)
except ValueError:
__snake_case : Optional[int] = None
with xopen(lowerCamelCase_ , 'rb' , use_auth_token=lowerCamelCase_) as f:
__snake_case : Optional[int] = BytesIO(f.read())
__snake_case : List[Any] = PIL.Image.open(bytes_)
else:
__snake_case : Dict = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def _lowercase (self : List[Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def _lowercase (self : Tuple , _A : Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
__snake_case : Tuple = pa.array([None] * len(lowerCamelCase_) , type=pa.binary())
__snake_case : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
__snake_case : int = pa.array([None] * len(lowerCamelCase_) , type=pa.string())
__snake_case : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
__snake_case : str = storage.field('bytes')
else:
__snake_case : Dict = pa.array([None] * len(lowerCamelCase_) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
__snake_case : Optional[int] = storage.field('path')
else:
__snake_case : Optional[int] = pa.array([None] * len(lowerCamelCase_) , type=pa.string())
__snake_case : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
__snake_case : str = pa.array(
[encode_np_array(np.array(lowerCamelCase_))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Tuple = pa.array([None] * len(lowerCamelCase_) , type=pa.string())
__snake_case : int = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowerCamelCase_ , self.pa_type)
def _lowercase (self : Union[str, Any] , _A : pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_A : Union[str, Any]):
with xopen(lowerCamelCase_ , 'rb') as f:
__snake_case : str = f.read()
return bytes_
__snake_case : str = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : List[str] = pa.array(
[os.path.basename(lowerCamelCase_) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
__snake_case : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowerCamelCase_ , self.pa_type)
def __UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __UpperCAmelCase ( UpperCAmelCase_ : "PIL.Image.Image" ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : Optional[int] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_a , format=_a )
return buffer.getvalue()
def __UpperCAmelCase ( UpperCAmelCase_ : "PIL.Image.Image" ) -> Union[str, Any]:
'''simple docstring'''
if hasattr(_a , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_a )}
def __UpperCAmelCase ( UpperCAmelCase_ : np.ndarray ) -> Optional[Any]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
__snake_case : Optional[Any] = array.dtype
__snake_case : Optional[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : int = dtype.kind
__snake_case : Tuple = dtype.itemsize
__snake_case : List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : Tuple = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : List[Any] = dtype_byteorder + dtype_kind + str(_a )
__snake_case : Union[str, Any] = np.dtype(_a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__snake_case : int = PIL.Image.fromarray(array.astype(_a ) )
return {"path": None, "bytes": image_to_bytes(_a )}
def __UpperCAmelCase ( UpperCAmelCase_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
__snake_case : List[str] = first_non_null_value(_a )
if isinstance(_a , _a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_a , np.ndarray ):
__snake_case : Dict = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
elif isinstance(_a , PIL.Image.Image ):
__snake_case : Optional[int] = no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
else:
return objs
else:
return objs
| 365 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[int]:
__snake_case : Optional[Any] = 0
def _lowercase (self : Tuple) -> int:
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : str) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = Path(_A) / 'preprocessor_config.json'
__snake_case : List[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Optional[int] = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : int = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[int]) -> Dict:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : str) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : str = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[int]) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> Optional[int]:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any]) -> Tuple:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
def extract(*lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : str ):
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = torch.ones([0] )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A painting of a squirrel eating a burger"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A painting of a squirrel eating a burger"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A painting of a squirrel eating a burger"""
UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_ )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
UpperCamelCase = 40_0366_0346
UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_ )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
UpperCamelCase = 27_3497_1755
UpperCamelCase = 7
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
UpperCamelCase = 10_4435_5234
UpperCamelCase = 12
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
UpperCamelCase = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 343 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = torch.exp(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = torch.sum(lowercase__ , dim=1 ) # sum of exp(x_i)
lowerCAmelCase_ : Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowercase__ ) - B / A
class __a ( nn.Module ):
def __init__( self : str , UpperCAmelCase : Union[str, Any] ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = config.output_attentions
lowerCAmelCase_ : Dict = config.output_hidden_states
lowerCAmelCase_ : Tuple = nn.ModuleList([BertLayer(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : List[Any] = nn.ModuleList([BertHighway(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def A ( self : Any , UpperCAmelCase : str ):
if (type(UpperCAmelCase ) is float) or (type(UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase_ : Tuple = x
else:
lowerCAmelCase_ : List[str] = x
def A ( self : Any , UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=None , ):
lowerCAmelCase_ : Optional[Any] = ()
lowerCAmelCase_ : Dict = ()
lowerCAmelCase_ : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase_ : str = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Optional[int] = layer_module(
UpperCAmelCase , UpperCAmelCase , head_mask[i] , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase_ : List[Any] = all_attentions + (layer_outputs[1],)
lowerCAmelCase_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : int = current_outputs + (all_attentions,)
lowerCAmelCase_ : Any = self.highway[i](UpperCAmelCase )
# logits, pooled_output
if not self.training:
lowerCAmelCase_ : str = highway_exit[0]
lowerCAmelCase_ : Tuple = entropy(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase_ : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase_ : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase , i + 1 )
else:
lowerCAmelCase_ : str = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase_ : Any = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : List[Any] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : str = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : List[str] = outputs + (all_attentions,)
lowerCAmelCase_ : Optional[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : List[str] , UpperCAmelCase : Dict ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : Dict = config
lowerCAmelCase_ : Dict = BertEmbeddings(UpperCAmelCase )
lowerCAmelCase_ : List[str] = DeeBertEncoder(UpperCAmelCase )
lowerCAmelCase_ : Tuple = BertPooler(UpperCAmelCase )
self.init_weights()
def A ( self : str ):
self.encoder.init_highway_pooler(self.pooler )
def A ( self : List[Any] ):
return self.embeddings.word_embeddings
def A ( self : List[Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = value
def A ( self : int , UpperCAmelCase : str ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase )
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : Dict=None , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
lowerCAmelCase_ : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase_ : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
lowerCAmelCase_ : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase_ : List[Any] = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if encoder_attention_mask is None:
lowerCAmelCase_ : Any = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if token_type_ids is None:
lowerCAmelCase_ : List[str] = torch.zeros(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase_ : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase_ : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase_ : Tuple = encoder_attention_mask[:, None, None, :]
lowerCAmelCase_ : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase_ : str = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase_ : Optional[Any] = self.get_head_mask(UpperCAmelCase , self.config.num_hidden_layers )
lowerCAmelCase_ : List[str] = self.embeddings(
input_ids=UpperCAmelCase , position_ids=UpperCAmelCase , token_type_ids=UpperCAmelCase , inputs_embeds=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self.encoder(
UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = encoder_outputs[0]
lowerCAmelCase_ : Any = self.pooler(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __a ( __UpperCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ):
lowerCAmelCase_ : str = message
lowerCAmelCase_ : int = exit_layer # start from 1!
class __a ( nn.Module ):
def __init__( self : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
lowerCAmelCase_ : Optional[int] = BertPooler(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : Optional[int] = nn.Linear(config.hidden_size , config.num_labels )
def A ( self : Tuple , UpperCAmelCase : List[Any] ):
# Pooler
lowerCAmelCase_ : List[str] = encoder_outputs[0]
lowerCAmelCase_ : Dict = self.pooler(UpperCAmelCase )
# "return" pooler_output
# BertModel
lowerCAmelCase_ : List[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase_ : Tuple = bmodel_output[1]
lowerCAmelCase_ : List[str] = self.dropout(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.classifier(UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : int , UpperCAmelCase : List[str] ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : Tuple = config.num_labels
lowerCAmelCase_ : int = config.num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = DeeBertModel(UpperCAmelCase )
lowerCAmelCase_ : Any = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Tuple=-1 , UpperCAmelCase : str=False , ):
lowerCAmelCase_ : List[str] = self.num_layers
try:
lowerCAmelCase_ : Any = self.bert(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase_ : Optional[Any] = outputs[1]
lowerCAmelCase_ : List[Any] = self.dropout(UpperCAmelCase )
lowerCAmelCase_ : Dict = self.classifier(UpperCAmelCase )
lowerCAmelCase_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ : Optional[int] = e.message
lowerCAmelCase_ : Optional[int] = e.exit_layer
lowerCAmelCase_ : Any = outputs[0]
if not self.training:
lowerCAmelCase_ : List[Any] = entropy(UpperCAmelCase )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : List[str] = MSELoss()
lowerCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : str = CrossEntropyLoss()
lowerCAmelCase_ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ : List[str] = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : Any = MSELoss()
lowerCAmelCase_ : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : Optional[Any] = CrossEntropyLoss()
lowerCAmelCase_ : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase )
if train_highway:
lowerCAmelCase_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ : List[str] = (loss,) + outputs
if not self.training:
lowerCAmelCase_ : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 28 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
snake_case_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case_ = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Dict:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> List[str]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Tuple:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case_ = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
snake_case_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(a__ , return_tensors="np" )
snake_case_ = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
snake_case_ = "lower newer"
snake_case_ = processor(text=a__ )
snake_case_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
snake_case_ = "lower newer"
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(a__ )
snake_case_ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=a__ , image_processor=a__ )
snake_case_ = "lower newer"
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 85 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__A = {
"E": 1_2.7_0,
"T": 9.0_6,
"A": 8.1_7,
"O": 7.5_1,
"I": 6.9_7,
"N": 6.7_5,
"S": 6.3_3,
"H": 6.0_9,
"R": 5.9_9,
"D": 4.2_5,
"L": 4.0_3,
"C": 2.7_8,
"U": 2.7_6,
"M": 2.4_1,
"W": 2.3_6,
"F": 2.2_3,
"G": 2.0_2,
"Y": 1.9_7,
"P": 1.9_3,
"B": 1.2_9,
"V": 0.9_8,
"K": 0.7_7,
"J": 0.1_5,
"X": 0.1_5,
"Q": 0.1_0,
"Z": 0.0_7,
}
__A = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> dict[str, int]:
"""simple docstring"""
__lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = get_letter_count(UpperCamelCase__ )
__lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ )
__lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase__ )
__lowerCamelCase = ''.join(freq_to_letter[freq] )
__lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = get_frequency_order(UpperCamelCase__ )
__lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _snake_case ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 'data2vec-text'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :int = vocab_size
a :Dict = hidden_size
a :Optional[int] = num_hidden_layers
a :Dict = num_attention_heads
a :int = hidden_act
a :Optional[int] = intermediate_size
a :Optional[Any] = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :List[Any] = max_position_embeddings
a :Union[str, Any] = type_vocab_size
a :Tuple = initializer_range
a :Optional[Any] = layer_norm_eps
a :Optional[int] = position_embedding_type
a :Optional[int] = use_cache
a :Dict = classifier_dropout
class _snake_case ( snake_case_ ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "multiple-choice":
a :Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a :List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 94 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : list[int] ) ->list[list[int]]:
A__ : Union[str, Any] = []
if len(UpperCAmelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCAmelCase__ ) ):
A__ : str = nums.pop(0 )
A__ : Any = permute(UpperCAmelCase__ )
for perm in permutations:
perm.append(UpperCAmelCase__ )
result.extend(UpperCAmelCase__ )
nums.append(UpperCAmelCase__ )
return result
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->str:
def backtrack(UpperCAmelCase__ : Optional[Any] ):
if start == len(UpperCAmelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A__ , A__ : Optional[int] = nums[i], nums[start]
backtrack(start + 1 )
A__ , A__ : Union[str, Any] = nums[i], nums[start] # backtrack
A__ : Union[str, Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
A_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 296 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 55 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a_ : List[str] = TypeVar("""T""")
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# map from node name to the node object
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# add an edge with the given weight
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 55 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
return None
class __snake_case :
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
return None
class __snake_case ( unittest.TestCase ):
_a : int= [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case ,"""tf""" ,12 ,**snake_case )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case ,"""pt""" ,12 ,**snake_case )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
from transformers import BertModel
lowercase : Optional[Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(snake_case ) )
vocab_file.flush()
lowercase : Optional[int] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase : List[str] = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case ,"""pt""" ,12 ,snake_case )
@require_tf
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : Union[str, Any] = self._test_export(snake_case ,"""tf""" ,12 ,**snake_case )
lowercase : int = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : Tuple = self._test_export(snake_case ,"""pt""" ,12 ,**snake_case )
lowercase : str = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase : Dict = Path(snake_case ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,**snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
from transformers import BertModel
lowercase : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case ,snake_case ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
from transformers import TFBertModel
lowercase : List[str] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : Dict = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case ,snake_case ,"""tf""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = FeatureExtractionPipeline(snake_case ,snake_case )
lowercase : Optional[int] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase , lowercase , lowercase , lowercase : Optional[int] = infer_shapes(snake_case ,snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) ,len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,snake_case )
self.assertSequenceEqual(variable_names[3:] ,snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase : Tuple = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase , lowercase : Optional[int] = ensure_valid_input(FuncContiguousArgs() ,snake_case ,snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) ,set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase : int = ensure_valid_input(FuncNonContiguousArgs() ,snake_case ,snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) ,1 )
self.assertEqual(len(snake_case ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 285 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = tempfile.mkdtemp()
lowercase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowercase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase : Any = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowercase : List[str] = os.path.join(self.tmpdirname ,snake_case )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase : str = [Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case )
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case )
self.assertIsInstance(processor_fast.tokenizer ,snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case )
self.assertIsInstance(processor_fast.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
lowercase : Dict = self.get_image_processor(do_normalize=snake_case )
lowercase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Optional[int] = self.prepare_image_inputs()
lowercase : Tuple = image_processor(snake_case ,return_tensors="""np""" )
lowercase : Optional[int] = processor(images=snake_case ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Any = """Alexandra,T-shirt的价格是15便士。"""
lowercase : int = processor(text=snake_case )
lowercase : Dict = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Any = self.prepare_image_inputs()
lowercase : Optional[Any] = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Any = processor.batch_decode(snake_case )
lowercase : str = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : Dict = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 285 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase : set[int] = set()
lowerCAmelCase : int
lowerCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ):
if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 108 | 1 |
"""simple docstring"""
from timeit import timeit
_lowerCAmelCase : List[Any] = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ ) // 2
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE_ ) )
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return s == s[::-1]
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = F"""all({name}(key) is value for key, value in test_data.items())"""
_lowerCamelCase : Optional[Any] = F"""from __main__ import test_data, {name}"""
_lowerCamelCase : Any = 500000
_lowerCamelCase : Dict = timeit(stmt=SCREAMING_SNAKE_CASE_ , setup=SCREAMING_SNAKE_CASE_ , number=SCREAMING_SNAKE_CASE_ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case__ ( _A: Tuple , _A: Dict=0.999 , _A: str="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A: Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A: List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase = []
for i in range(_A ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase_ : Optional[Any] = 2
@register_to_config
def __init__( self , __lowerCAmelCase = 1000 , __lowerCAmelCase = 0.00085 , __lowerCAmelCase = 0.012 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = "linspace" , __lowerCAmelCase = 0 , ):
"""simple docstring"""
if trained_betas is not None:
lowerCAmelCase = torch.tensor(__lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCAmelCase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(__lowerCAmelCase)
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if schedule_timesteps is None:
lowerCAmelCase = self.timesteps
lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
lowerCAmelCase = 1 if len(__lowerCAmelCase) > 1 else 0
else:
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a_ ( self):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
else:
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = num_inference_steps
lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , __lowerCAmelCase , dtype=__lowerCAmelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , __lowerCAmelCase) * step_ratio).round()[::-1].copy().astype(__lowerCAmelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(__lowerCAmelCase , 0 , -step_ratio)).round().copy().astype(__lowerCAmelCase)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
lowerCAmelCase = torch.from_numpy(np.log(__lowerCAmelCase)).to(__lowerCAmelCase)
lowerCAmelCase = np.interp(__lowerCAmelCase , np.arange(0 , len(__lowerCAmelCase)) , __lowerCAmelCase)
lowerCAmelCase = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(device=__lowerCAmelCase)
# interpolate sigmas
lowerCAmelCase = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
lowerCAmelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(__lowerCAmelCase).startswith("""mps"""):
# mps does not support float64
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase , dtype=torch.floataa)
else:
lowerCAmelCase = torch.from_numpy(__lowerCAmelCase).to(__lowerCAmelCase)
# interpolate timesteps
lowerCAmelCase = self.sigma_to_t(__lowerCAmelCase).to(__lowerCAmelCase , dtype=timesteps.dtype)
lowerCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
lowerCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps])
lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase = defaultdict(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = sigma.log()
# get distribution
lowerCAmelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
lowerCAmelCase = low_idx + 1
lowerCAmelCase = self.log_sigmas[low_idx]
lowerCAmelCase = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase = (low - log_sigma) / (low - high)
lowerCAmelCase = w.clamp(0 , 1)
# transform interpolation to time range
lowerCAmelCase = (1 - w) * low_idx + w * high_idx
lowerCAmelCase = t.view(sigma.shape)
return t
@property
def a_ ( self):
"""simple docstring"""
return self.sample is None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , ):
"""simple docstring"""
lowerCAmelCase = self.index_for_timestep(__lowerCAmelCase)
# advance index counter by 1
lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(__lowerCAmelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase = self.sigmas[step_index]
lowerCAmelCase = self.sigmas_interpol[step_index + 1]
lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase = self.sigmas[step_index - 1]
lowerCAmelCase = self.sigmas_interpol[step_index]
lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase = 0
lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""")
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase = sigma_next - sigma_hat
lowerCAmelCase = self.sample
lowerCAmelCase = None
lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCAmelCase):
# mps does not support float64
lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa)
lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
lowerCAmelCase = self.timesteps.to(original_samples.device)
lowerCAmelCase = timesteps.to(original_samples.device)
lowerCAmelCase = [self.index_for_timestep(__lowerCAmelCase , __lowerCAmelCase) for t in timesteps]
lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
lowerCAmelCase = sigma.unsqueeze(-1)
lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 272 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase: Optional[Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Tuple = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase: Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase: Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
UpperCAmelCase__ : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase__ : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n'
UpperCAmelCase__ : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([re.sub(_A , """""" , _A ) for x in predictions] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([re.sub(_A , """""" , _A ) for x in references] )
else:
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(_A )
SCREAMING_SNAKE_CASE__ : Tuple = np.asarray(_A )
if ignore_case:
SCREAMING_SNAKE_CASE__ : List[str] = np.char.lower(_A )
SCREAMING_SNAKE_CASE__ : Any = np.char.lower(_A )
if ignore_punctuation:
SCREAMING_SNAKE_CASE__ : int = string.punctuation.maketrans("""""" , """""" , string.punctuation )
SCREAMING_SNAKE_CASE__ : Tuple = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
SCREAMING_SNAKE_CASE__ : Optional[int] = string.digits.maketrans("""""" , """""" , string.digits )
SCREAMING_SNAKE_CASE__ : str = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : int = predictions == references
return {"exact_match": np.mean(_A ) * 1_00}
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Any = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCamelCase )
return parser.parse_args()
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = parse_args()
# Import training_script as a module.
snake_case_ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : str = script_fpath.stem
snake_case_ : Optional[int] = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 371 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case : Any = logging.get_logger(__name__)
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['input_values', 'attention_mask']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
a :Union[str, Any] = do_normalize
a :List[Any] = return_attention_mask
a :List[str] = num_mel_bins
a :List[str] = hop_length
a :List[Any] = win_length
a :List[Any] = win_function
a :List[str] = frame_signal_scale
a :List[str] = fmin
a :Tuple = fmax
a :List[Any] = mel_floor
a :Union[str, Any] = reduction_factor
a :Union[str, Any] = win_length * sampling_rate // 1000
a :Dict = hop_length * sampling_rate // 1000
a :Any = optimal_fft_length(self.sample_size )
a :List[Any] = (self.n_fft // 2) + 1
a :Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
a :str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ):
if attention_mask is not None:
a :List[Any] = np.array(_lowerCamelCase , np.intaa )
a :List[str] = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
a :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a :Union[str, Any] = padding_value
normed_input_values.append(_lowerCamelCase )
else:
a :List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , ):
a :Union[str, Any] = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
a :Optional[Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
a :int = None
if audio_target is not None:
a :Optional[int] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
a :Optional[Any] = inputs_target['''input_values''']
a :Union[str, Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
a :str = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a :List[Any] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a :str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a :List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a :List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
a :Optional[int] = self.feature_size
# convert into correct format for padding
if is_target:
a :List[Any] = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
a :List[Any] = BatchFeature({'''input_values''': features} )
a :List[Any] = self.num_mel_bins
else:
a :List[str] = BatchFeature({'''input_values''': speech} )
a :Optional[int] = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
a :List[str] = feature_size_hack
# convert input values to correct format
a :Tuple = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
a :int = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a :Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a :Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a :Any = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
a :Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a :Union[str, Any] = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a :List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a :Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 94 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCamelCase ( _A, _A, _A = "x", _A = 10**-10, _A = 1, ):
"""simple docstring"""
__magic_name__ : int = symbols(_A )
__magic_name__ : Tuple = lambdify(_A, _A )
__magic_name__ : Dict = lambdify(_A, diff(_A, _A ) )
__magic_name__ : Any = starting_point
while True:
if diff_function(_A ) != 0:
__magic_name__ : Any = prev_guess - multiplicity * func(_A ) / diff_function(
_A )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__magic_name__ : Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 368 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Optional[Any]:
__magic_name__ : Optional[Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Tuple = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : List[str] = attention_probs_dropout_prob
__magic_name__ : Tuple = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : str = scope
__magic_name__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__magic_name__ : Dict = (image_size // patch_size) ** 2
__magic_name__ : int = num_patches + 2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TFDeiTModel(config=lowerCAmelCase__ )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : int = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Tuple = 1
__magic_name__ : List[Any] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Union[str, Any] = self.type_sequence_label_size
__magic_name__ : str = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : int = 1
__magic_name__ : List[str] = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase__ : Any = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase__ : int = False
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : int = False
def __magic_name__ ( self ) -> str:
__magic_name__ : str = TFDeiTModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(lowerCAmelCase__ )
__magic_name__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
__magic_name__ : Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __magic_name__ ( self ) -> Dict:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__magic_name__ : Any = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
__magic_name__ : Dict = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 138 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case : int = 1_0_0_0_0
_snake_case : Optional[List[str]] = None
_snake_case : Optional[datasets.Features] = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case : Optional[int] = ParquetConfig
def snake_case__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
_UpperCamelCase = data_files
if isinstance(snake_case__ , snake_case__ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
_UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_UpperCamelCase = [dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , '''rb''' ) as f:
_UpperCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={'''files''': files} ) )
return splits
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : pa.Table ) -> Tuple:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCamelCase = table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , '''rb''' ) as f:
_UpperCamelCase = pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_UpperCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 324 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A__ : Any = True
except (ImportError, ModuleNotFoundError):
A__ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _snake_case ( lowerCamelCase__ : str ) -> str:
re.sub("<n>" , "" , lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 144 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase ( lowerCAmelCase__ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> Any:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_a : str = {"""source""": """What is love ?""", """target""": """life"""}
_a : Optional[int] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_a : Optional[Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_SCREAMING_SNAKE_CASE , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] = "pytorch" ) -> List[str]:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """output""" )
_a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """data""" )
self._create_dummy_data(data_dir=_SCREAMING_SNAKE_CASE )
_a : Dict = f"""\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n """.split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_a : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() )
_a : int = os.path.join(_SCREAMING_SNAKE_CASE , """metrics.json""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
_a : int = json.load(_SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def _lowercase ( self : Dict ) -> Tuple:
_a : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase ( self : str ) -> Any:
_a : Any = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Tuple = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 355 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case( SCREAMING_SNAKE_CASE__="ro" , SCREAMING_SNAKE_CASE__="en" , SCREAMING_SNAKE_CASE__="wmt16" , SCREAMING_SNAKE_CASE__=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowercase : Tuple = f"{src_lang}-{tgt_lang}"
print(f"Converting {dataset}-{pair}" )
lowercase : List[str] = datasets.load_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if save_dir is None:
lowercase : Tuple = f"{dataset}-{pair}"
lowercase : List[Any] = Path(SCREAMING_SNAKE_CASE__ )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ds.keys():
print(f"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
lowercase : Dict = """val""" if split == """validation""" else split
lowercase : Any = save_dir.joinpath(f"{fn}.source" )
lowercase : Union[str, Any] = save_dir.joinpath(f"{fn}.target" )
lowercase : Any = src_path.open("""w+""" )
lowercase : str = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase : str = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 20 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 368 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = 'Hello world! cécé herlolip'
UpperCAmelCase_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : storage )
UpperCAmelCase__ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) , SCREAMING_SNAKE_CASE__ )
original.eval()
UpperCAmelCase__ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ = encoder_input_ids
UpperCAmelCase__ = decoder_input_ids
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = original.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = new_model.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
UpperCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 61 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "efficientnet"
def __init__( self : Optional[Any] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 6_0_0 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 3.1 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase_ : List[int] = [] , lowerCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : float = 0.25 , lowerCAmelCase_ : str = "swish" , lowerCAmelCase_ : int = 2_5_6_0 , lowerCAmelCase_ : str = "mean" , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 0.001 , lowerCAmelCase_ : float = 0.99 , lowerCAmelCase_ : float = 0.5 , lowerCAmelCase_ : float = 0.2 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_)
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = width_coefficient
lowercase_ = depth_coefficient
lowercase_ = depth_divisor
lowercase_ = kernel_sizes
lowercase_ = in_channels
lowercase_ = out_channels
lowercase_ = depthwise_padding
lowercase_ = strides
lowercase_ = num_block_repeats
lowercase_ = expand_ratios
lowercase_ = squeeze_expansion_ratio
lowercase_ = hidden_act
lowercase_ = hidden_dim
lowercase_ = pooling_type
lowercase_ = initializer_range
lowercase_ = batch_norm_eps
lowercase_ = batch_norm_momentum
lowercase_ = dropout_rate
lowercase_ = drop_connect_rate
lowercase_ = sum(lowerCAmelCase_) * 4
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = version.parse("1.11" )
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return 1E-5
| 136 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : Tuple = BeautifulSoup(requests.get(A_ , params=A_ ).content , "html.parser" )
a : List[Any] = soup.find("div" , attrs={"class": "gs_ri"} )
a : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 226 |
"""simple docstring"""
import sys
import turtle
def lowercase ( A_ , A_ )-> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 226 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE_ : CLIPSegProcessor , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE_ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
A: Tuple = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
A: Any = dict(scheduler.config )
A: int = 1
A: Optional[Any] = FrozenDict(SCREAMING_SNAKE_CASE_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
A: List[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , SCREAMING_SNAKE_CASE_ , standard_warn=SCREAMING_SNAKE_CASE_ )
A: str = dict(scheduler.config )
A: List[str] = True
A: Any = FrozenDict(SCREAMING_SNAKE_CASE_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE_ , segmentation_processor=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A: Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A: Union[str, Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 50 , SCREAMING_SNAKE_CASE_ : float = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A: int = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
A: List[Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE_ )
A: List[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
A: Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
A: List[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , )
| 319 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
from __future__ import annotations
def snake_case (A_ :list[int] , A_ :int ):
'''simple docstring'''
a : list[list[int]] = []
a : list[int] = []
a : Optional[Any] = 0
a : List[str] = sum(A_ )
create_state_space_tree(A_ , A_ , A_ , A_ , A_ , A_ )
return result
def snake_case (A_ :list[int] , A_ :int , A_ :int , A_ :list[int] , A_ :list[list[int]] , A_ :int , ):
'''simple docstring'''
if sum(A_ ) > max_sum or (remaining_nums_sum + sum(A_ )) < max_sum:
return
if sum(A_ ) == max_sum:
result.append(A_ )
return
for index in range(A_ , len(A_ ) ):
create_state_space_tree(
A_ , A_ , index + 1 , [*path, nums[index]] , A_ , remaining_nums_sum - nums[index] , )
_UpperCamelCase : Tuple = [3, 34, 4, 12, 5, 2]
_UpperCamelCase : List[Any] = 9
_UpperCamelCase : List[str] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 364 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = '▁'
_UpperCamelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCamelCase : Optional[int] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCamelCase : List[str] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_UpperCamelCase : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , A : Union[str, Any] , A : List[Any]="<s>" , A : Dict="</s>" , A : List[Any]="</s>" , A : Any="<s>" , A : Dict="<unk>" , A : Any="<pad>" , A : Optional[int]="<mask>" , A : str=None , A : Tuple=None , A : List[str]=None , A : Optional[Dict[str, Any]] = None , A : Any=None , A : List[Any]=False , **A : Tuple , ):
'''simple docstring'''
a : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
a : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , tokenizer_file=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A , **A , )
a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
a : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Any = 1
a : int = len(self.sp_model )
a : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A )
}
a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
a : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
a : List[Any] = self.lang_code_to_id[self._src_lang]
a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
a : Dict = self.__dict__.copy()
a : int = None
a : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , A : Any ):
'''simple docstring'''
a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Any = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Dict , A : str ):
'''simple docstring'''
a : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
a : Tuple = [1] * len(self.prefix_tokens )
a : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def lowerCamelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
a : List[str] = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[str] , A : Optional[int] , A : str , A : Optional[str] , A : Optional[str] , **A : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : Any = src_lang
a : Any = self(A , add_special_tokens=A , return_tensors=A , **A )
a : Tuple = self.convert_tokens_to_ids(A )
a : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , A : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : List[str] , A : Dict ):
'''simple docstring'''
a : List[str] = ''.join(A ).replace(A , ' ' ).strip()
return out_string
def lowerCamelCase__ ( self : Any , A : str , A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a : Optional[int] = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Optional[int] , ):
'''simple docstring'''
a : Union[str, Any] = src_lang
a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Union[str, Any] , A : Dict ):
'''simple docstring'''
a : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a : List[Any] = []
a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
a : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a : List[str] = []
a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
| 186 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowercase : Union[str, Any] = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
lowercase : str = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
lowercase : Any = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowercase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
lowercase : Optional[int] = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 42 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = VideoToVideoSDPipeline
UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase : Tuple = False
# No `output_type`.
UpperCAmelCase : Tuple = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __snake_case ( self : str):
torch.manual_seed(0)
a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
a : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0)
a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Optional[int] = CLIPTextModel(__UpperCAmelCase)
a : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=0):
# 3 frames
a : Optional[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : List[str] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[str] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __snake_case ( self : Optional[int]):
a : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = VideoToVideoSDPipeline(**__UpperCAmelCase)
a : Optional[int] = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[Any] = "np"
a : Optional[Any] = sd_pipe(**__UpperCAmelCase).frames
a : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
a : Union[str, Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : List[Any]):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=5e-3)
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def __snake_case ( self : List[Any]):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
def __snake_case ( self : Tuple):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
def __snake_case ( self : Dict):
pass
def __snake_case ( self : Optional[Any]):
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any]):
a : Optional[int] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa)
pipe.enable_model_cpu_offload()
# 10 frames
a : Union[str, Any] = torch.Generator(device="cpu").manual_seed(0)
a : Optional[Any] = torch.randn((1, 10, 3, 1024, 576) , generator=__UpperCAmelCase)
a : Optional[int] = video.to("cuda")
a : int = "Spiderman is surfing"
a : Optional[Any] = pipe(__UpperCAmelCase , video=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=3 , output_type="pt").frames
a : Union[str, Any] = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656])
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1e-2
| 226 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = """MobileNetV1Config"""
# Base docstring
__lowercase = """google/mobilenet_v1_1.0_224"""
__lowercase = [1, 1024, 7, 7]
# Image classification docstring
__lowercase = """google/mobilenet_v1_1.0_224"""
__lowercase = """tabby, tabby cat"""
__lowercase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase ( A_ , A_ , A_=None )-> int:
'''simple docstring'''
a : Union[str, Any] = {}
if isinstance(A_ , A_ ):
a : Tuple = model.mobilenet_va
else:
a : Optional[int] = model
a : Any = "MobilenetV1/Conv2d_0/"
a : List[Any] = backbone.conv_stem.convolution.weight
a : Tuple = backbone.conv_stem.normalization.bias
a : int = backbone.conv_stem.normalization.weight
a : Optional[int] = backbone.conv_stem.normalization.running_mean
a : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : Dict = i + 1
a : str = i * 2
a : str = backbone.layer[pt_index]
a : Tuple = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
a : Union[str, Any] = pointer.convolution.weight
a : Optional[int] = pointer.normalization.bias
a : Any = pointer.normalization.weight
a : Optional[int] = pointer.normalization.running_mean
a : Dict = pointer.normalization.running_var
a : Dict = backbone.layer[pt_index + 1]
a : List[str] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
a : Dict = pointer.convolution.weight
a : Any = pointer.normalization.bias
a : List[Any] = pointer.normalization.weight
a : Tuple = pointer.normalization.running_mean
a : List[str] = pointer.normalization.running_var
if isinstance(A_ , A_ ):
a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
a : List[Any] = model.classifier.weight
a : Optional[int] = model.classifier.bias
return tf_to_pt_map
def lowercase ( A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
a : List[str] = tf.train.list_variables(A_ )
a : Dict = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
a : List[str] = tf.train.load_variable(A_ , A_ )
a : int = array
# Build TF to PyTorch weights loading map
a : int = _build_tf_to_pytorch_map(A_ , A_ , A_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
a : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
a : Optional[Any] = np.transpose(A_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
a : List[str] = array.squeeze().transpose()
else:
a : List[str] = np.transpose(A_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
a : Any = torch.from_numpy(A_ )
tf_weights.pop(A_ , A_ )
tf_weights.pop(name + "/RMSProp" , A_ )
tf_weights.pop(name + "/RMSProp_1" , A_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , A_ )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def lowercase ( A_ , A_ )-> torch.Tensor:
'''simple docstring'''
a , a : str = features.shape[-2:]
a , a : Tuple = conv_layer.stride
a , a : Union[str, Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
a : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
a : Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : Optional[int] = max(kernel_width - stride_width , 0 )
else:
a : int = max(kernel_width - (in_width % stride_width) , 0 )
a : List[Any] = pad_along_width // 2
a : List[str] = pad_along_width - pad_left
a : str = pad_along_height // 2
a : Any = pad_along_height - pad_top
a : Tuple = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A_ , A_ , "constant" , 0.0 )
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ):
super().__init__()
a : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''')
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''')
a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2)
a : List[str] = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode="zeros" , )
if use_normalization:
a : str = nn.BatchNormad(
num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , )
else:
a : List[str] = None
if use_activation:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCAmelCase):
a : Optional[Any] = ACTaFN[config.hidden_act]
else:
a : str = config.hidden_act
else:
a : Dict = None
def __snake_case ( self : Tuple , __UpperCAmelCase : torch.Tensor):
if self.config.tf_padding:
a : Tuple = apply_tf_padding(__UpperCAmelCase , self.convolution)
a : Dict = self.convolution(__UpperCAmelCase)
if self.normalization is not None:
a : List[str] = self.normalization(__UpperCAmelCase)
if self.activation is not None:
a : Optional[Any] = self.activation(__UpperCAmelCase)
return features
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
UpperCAmelCase : str = load_tf_weights_in_mobilenet_va
UpperCAmelCase : List[str] = """mobilenet_v1"""
UpperCAmelCase : Dict = """pixel_values"""
UpperCAmelCase : str = False
def __snake_case ( self : str , __UpperCAmelCase : Union[nn.Linear, nn.Convad]):
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
__lowercase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowercase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" ,_a ,)
class _A ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True):
super().__init__(__UpperCAmelCase)
a : List[str] = config
a : List[Any] = 32
a : Union[str, Any] = max(int(depth * config.depth_multiplier) , config.min_depth)
a : List[str] = MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , )
a : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : List[Any] = nn.ModuleList()
for i in range(13):
a : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : List[str] = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ))
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ))
a : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any):
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ):
a : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
a : Optional[int] = self.conv_stem(__UpperCAmelCase)
a : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
a : Union[str, Any] = layer_module(__UpperCAmelCase)
if output_hidden_states:
a : str = all_hidden_states + (hidden_states,)
a : Dict = hidden_states
if self.pooler is not None:
a : Optional[int] = torch.flatten(self.pooler(__UpperCAmelCase) , start_dim=1)
else:
a : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,_a ,)
class _A ( _a ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : MobileNetVaConfig):
super().__init__(__UpperCAmelCase)
a : Dict = config.num_labels
a : Union[str, Any] = MobileNetVaModel(__UpperCAmelCase)
a : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase)
a : Optional[int] = nn.Linear(__UpperCAmelCase , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ):
a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Dict = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase)
a : List[Any] = outputs.pooler_output if return_dict else outputs[1]
a : List[Any] = self.classifier(self.dropout(__UpperCAmelCase))
a : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Any = "single_label_classification"
else:
a : Optional[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
a : Union[str, Any] = MSELoss()
if self.num_labels == 1:
a : Tuple = loss_fct(logits.squeeze() , labels.squeeze())
else:
a : Dict = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
elif self.config.problem_type == "single_label_classification":
a : Optional[Any] = CrossEntropyLoss()
a : Tuple = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
a : Dict = BCEWithLogitsLoss()
a : List[Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase)
if not return_dict:
a : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 226 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
__snake_case :Tuple = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = '''data2vec-text'''
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=30_522 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class _A ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 49 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case_ = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _UpperCamelCase ( self : Tuple , snake_case : Any=0 ):
'''simple docstring'''
A__ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) )
A__ : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
A__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Dict = self.get_dummy_inputs()
A__ : int = pipe(**__lowerCAmelCase ).images
A__ : Tuple = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A__ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Dict = self.get_dummy_inputs()
A__ : int = pipe(**__lowerCAmelCase ).images
A__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Union[str, Any] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Union[str, Any] = self.get_dummy_inputs()
A__ : Tuple = pipe(**__lowerCAmelCase ).images
A__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Any = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[Any] = self.get_dummy_inputs()
A__ : Union[str, Any] = pipe(**__lowerCAmelCase ).images
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Optional[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
A__ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[str] = self.get_dummy_inputs()
A__ : Optional[Any] = pipe(**__lowerCAmelCase ).images
A__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ : str = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Union[str, Any] = ort.SessionOptions()
A__ : Dict = False
return options
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Tuple = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : List[Any] = """A fantasy landscape, trending on artstation"""
A__ : str = torch.manual_seed(0 )
A__ : str = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type="""np""" , )
A__ : Any = output.images
A__ : List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ : Dict = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ : Any = init_image.resize((128, 128) )
A__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
A__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
A__ : Optional[int] = """A fantasy landscape, trending on artstation"""
A__ : List[Any] = torch.manual_seed(0 )
A__ : List[Any] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""np""" , )
A__ : Optional[int] = output.images
A__ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A__ : Dict = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 358 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Union[str, Any] = nn.BatchNormad(4 )
A__ : Union[str, Any] = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : List[str] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , model.state_dict() )
A__ : List[str] = os.path.join(snake_case , """index.json""" )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ : List[str] = os.path.join(snake_case , F'{key}.dat' )
self.assertTrue(os.path.isfile(snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ : str = torch.randn(2 , 3 , dtype=snake_case )
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = offload_weight(snake_case , """weight""" , snake_case , {} )
A__ : Union[str, Any] = os.path.join(snake_case , """weight.dat""" )
self.assertTrue(os.path.isfile(snake_case ) )
self.assertDictEqual(snake_case , {"""weight""": {"""shape""": [2, 3], """dtype""": str(snake_case ).split(""".""" )[1]}} )
A__ : str = load_offloaded_weight(snake_case , index["""weight"""] )
self.assertTrue(torch.equal(snake_case , snake_case ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = ModelForTest()
A__ : Union[str, Any] = model.state_dict()
A__ : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
A__ : List[Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Dict = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
A__ : int = {k: v for k, v in state_dict.items() if """weight""" in k}
A__ : Tuple = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
A__ : Optional[Any] = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(snake_case , snake_case )
# Duplicates are removed
A__ : int = OffloadedWeightsLoader(state_dict=snake_case , save_folder=snake_case )
# Every key is there with the right value
self.assertEqual(sorted(snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(snake_case , weight_map[key] ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
A__ : str = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1""": 0, """a.2""": 2} )
A__ : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
A__ : int = extract_submodules_state_dict(snake_case , ["""a.1""", """a.2"""] )
self.assertDictEqual(snake_case , {"""a.1.a""": 0, """a.2.a""": 2} )
| 296 | 0 |
from __future__ import annotations
import math
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if len(lowercase__ ) != 2 or len(a[0] ) != 2 or len(lowercase__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
__SCREAMING_SNAKE_CASE : str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase__ ) )
]
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase__ ) )
]
def _UpperCamelCase ( lowercase__ ):
if len(lowercase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
__SCREAMING_SNAKE_CASE : int = len(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = matrix_length // 2
__SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(lowercase__ , lowercase__ )] for i in range(lowercase__ )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[a[i][j] for j in range(lowercase__ , lowercase__ )] for i in range(lowercase__ , lowercase__ )
]
__SCREAMING_SNAKE_CASE : int = [[a[i][j] for j in range(lowercase__ )] for i in range(lowercase__ )]
__SCREAMING_SNAKE_CASE : int = [[a[i][j] for j in range(lowercase__ )] for i in range(lowercase__ , lowercase__ )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( lowercase__ ):
return len(lowercase__ ), len(matrix[0] )
def _UpperCamelCase ( lowercase__ ):
print('''\n'''.join(str(lowercase__ ) for line in matrix ) )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if matrix_dimensions(lowercase__ ) == (2, 2):
return default_matrix_multiplication(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = split_matrix(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = split_matrix(lowercase__ )
__SCREAMING_SNAKE_CASE : str = actual_strassen(lowercase__ , matrix_subtraction(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : List[str] = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ )
__SCREAMING_SNAKE_CASE : int = actual_strassen(lowercase__ , matrix_subtraction(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : int = actual_strassen(matrix_addition(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(matrix_subtraction(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(matrix_subtraction(lowercase__ , lowercase__ ) , matrix_addition(lowercase__ , lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = matrix_addition(matrix_subtraction(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ ) , lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = matrix_addition(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = matrix_addition(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase__ , lowercase__ ) , lowercase__ ) , lowercase__ )
# construct the new matrix from our 4 quadrants
__SCREAMING_SNAKE_CASE : int = []
for i in range(len(lowercase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if matrix_dimensions(lowercase__ )[1] != matrix_dimensions(lowercase__ )[0]:
__SCREAMING_SNAKE_CASE : str = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(lowercase__ )
__SCREAMING_SNAKE_CASE : int = matrix_dimensions(lowercase__ )
__SCREAMING_SNAKE_CASE : int = matrix_dimensions(lowercase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__SCREAMING_SNAKE_CASE : Optional[int] = max(*lowercase__ , *lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = int(math.pow(2 , math.ceil(math.loga(lowercase__ ) ) ) )
__SCREAMING_SNAKE_CASE : str = matrixa
__SCREAMING_SNAKE_CASE : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowercase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__SCREAMING_SNAKE_CASE : int = actual_strassen(lowercase__ , lowercase__ )
# Removing the additional zeros
for i in range(0 , lowercase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowercase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowerCAmelCase : str =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowerCAmelCase : Any =[[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 9 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 0 |
import heapq
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> set[int]:
"""simple docstring"""
__lowerCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase__ , [-1 * len(UpperCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowerCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowerCamelCase = heapq.heappop(UpperCamelCase__ )[1][0]
chosen_vertices.add(UpperCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowerCamelCase = elem[1][1].index(UpperCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 348 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
"""simple docstring"""
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [float(x) for x in input("Enter the elements of first array: ").split()]
__A = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 348 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : List[str] ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = 30
UpperCAmelCase__ = self.seq_length + self.mem_len
UpperCAmelCase__ = 15
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = [10, 50, 80]
UpperCAmelCase__ = 32
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 8
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
UpperCAmelCase__ = None
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
UpperCAmelCase__ = self.vocab_size - 1
UpperCAmelCase__ = 0.0_1
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowerCAmelCase ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = TFTransfoXLLMHeadModel(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
UpperCAmelCase__ , UpperCAmelCase__ = model([input_ids_a, mems_a] ).to_tuple()
UpperCAmelCase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCAmelCase__ , UpperCAmelCase__ = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case__ = () if is_tf_available() else ()
snake_case__ = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,d_embed=37 )
def __lowerCAmelCase ( self : int ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ):
self.model_tester.set_seed()
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
self.model_tester.set_seed()
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCAmelCase__ = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ ,tf.keras.layers.Layer )
UpperCAmelCase__ = model.get_bias()
assert name is None
else:
UpperCAmelCase__ = model.get_output_embeddings()
assert x is None
UpperCAmelCase__ = model.get_bias()
assert name is None
def __lowerCAmelCase ( self : List[str] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __lowerCAmelCase ( self : str ):
pass
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCAmelCase__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCAmelCase__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCAmelCase__ = model.generate(lowerCamelCase__ ,max_length=200 ,do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase__ )
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Optional[Any]=6 , lowercase_ : int=17 , lowercase_ : List[Any]=23 , lowercase_ : List[Any]=11 , lowercase_ : Dict=True , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = act_dim
_UpperCamelCase = state_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_length
_UpperCamelCase = is_training
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000)
_UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
_UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , ) -> int:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (DecisionTransformerModel,) if is_torch_available() else ()
__A = ()
__A = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@slow
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DecisionTransformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowercase_)] , lowercase_)
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase = 10 # defined by the RL environment, may be normalized
_UpperCamelCase = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = model.config
torch.manual_seed(0)
_UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa) # env.reset()
_UpperCamelCase = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowercase_)
_UpperCamelCase = torch.tensor(lowercase_ , device=lowercase_ , dtype=torch.floataa).reshape(1 , 1 , 1)
_UpperCamelCase = state
_UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.zeros(1 , 0 , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.tensor(0 , device=lowercase_ , dtype=torch.long).reshape(1 , 1)
for step in range(lowercase_):
_UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model(
states=lowercase_ , actions=lowercase_ , rewards=lowercase_ , returns_to_go=lowercase_ , timesteps=lowercase_ , attention_mask=lowercase_ , return_dict=lowercase_ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa),
1.0,
False,
{},
)
_UpperCamelCase = action_pred[0, -1]
_UpperCamelCase = torch.cat([states, state] , dim=1)
_UpperCamelCase = returns_to_go[0, -1] - reward
_UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowercase_ , dtype=torch.long) * (step + 1)] , dim=1)
| 63 | from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = 30
_UpperCamelCase = self.seq_length + self.mem_len
_UpperCamelCase = 15
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = [10, 50, 80]
_UpperCamelCase = 32
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = 8
_UpperCamelCase = 128
_UpperCamelCase = 2
_UpperCamelCase = 2
_UpperCamelCase = None
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 3
_UpperCamelCase = self.vocab_size - 1
_UpperCamelCase = 0.01
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__A = () if is_tf_available() else ()
__A = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37)
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_UpperCamelCase = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer)
_UpperCamelCase = model.get_bias()
assert name is None
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
_UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_)
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
| 63 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
def get_masked_lm_array(_lowerCAmelCase : str ):
UpperCAmelCase__ = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase__ = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase__ = array.transpose()
return torch.from_numpy(_lowerCAmelCase )
def get_encoder_array(_lowerCAmelCase : str ):
UpperCAmelCase__ = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase__ = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase__ = array.transpose()
return torch.from_numpy(_lowerCAmelCase )
def get_encoder_layer_array(_lowerCAmelCase : int , _lowerCAmelCase : str ):
UpperCAmelCase__ = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase__ = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase__ = array.transpose()
return torch.from_numpy(_lowerCAmelCase )
def get_encoder_attention_layer_array(_lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
UpperCAmelCase__ = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
UpperCAmelCase__ = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = array.reshape(_lowerCAmelCase )
if "kernel" in name:
UpperCAmelCase__ = array.transpose()
return torch.from_numpy(_lowerCAmelCase )
print(F'''Loading model based on config from {config_path}...''' )
UpperCAmelCase__ = BertConfig.from_json_file(_lowerCAmelCase )
UpperCAmelCase__ = BertForMaskedLM(_lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase__ = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase__ = layer.attention.self
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_query_dense/bias" , self_attn.query.bias.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_key_dense/bias" , self_attn.key.bias.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase__ = layer.attention.output
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
UpperCAmelCase__ = get_encoder_attention_layer_array(
_lowerCAmelCase , "_output_dense/bias" , self_output.dense.bias.data.shape )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_attention_layer_norm/gamma" )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_attention_layer_norm/beta" )
# Intermediate
UpperCAmelCase__ = layer.intermediate
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_intermediate_dense/kernel" )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_intermediate_dense/bias" )
# Output
UpperCAmelCase__ = layer.output
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_output_dense/kernel" )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_output_dense/bias" )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_output_layer_norm/gamma" )
UpperCAmelCase__ = get_encoder_layer_array(_lowerCAmelCase , "_output_layer_norm/beta" )
# Embeddings
UpperCAmelCase__ = get_encoder_array("_position_embedding_layer/embeddings" )
UpperCAmelCase__ = get_encoder_array("_type_embedding_layer/embeddings" )
UpperCAmelCase__ = get_encoder_array("_embedding_norm_layer/gamma" )
UpperCAmelCase__ = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
UpperCAmelCase__ = model.cls.predictions.transform
UpperCAmelCase__ = get_masked_lm_array("dense/kernel" )
UpperCAmelCase__ = get_masked_lm_array("dense/bias" )
UpperCAmelCase__ = get_masked_lm_array("layer_norm/gamma" )
UpperCAmelCase__ = get_masked_lm_array("layer_norm/beta" )
UpperCAmelCase__ = get_masked_lm_array("embedding_table" )
# Pooling
UpperCAmelCase__ = BertPooler(config=_lowerCAmelCase )
UpperCAmelCase__ = get_encoder_array("_pooler_layer/kernel" )
UpperCAmelCase__ = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(_lowerCAmelCase )
# Integration test - should load without any errors ;)
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(_lowerCAmelCase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 169 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase : int = get_logger(__name__)
_lowerCAmelCase : Any = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase ( lowerCAmelCase ):
@add_start_docstrings(lowerCamelCase )
def __call__( self :List[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int , **lowerCamelCase :str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
else:
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :str , lowerCamelCase :float ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCAmelCase__ = temperature
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , lowerCamelCase :float , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> Union[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCAmelCase__ = top_p
UpperCAmelCase__ = filter_value
UpperCAmelCase__ = min_tokens_to_keep
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , scores.shape[-1] )
UpperCAmelCase__ = jnp.full_like(lowerCamelCase , self.filter_value )
UpperCAmelCase__ = jax.nn.softmax(lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ = jnp.roll(lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase )
# min tokens to keep
UpperCAmelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase )
UpperCAmelCase__ = jnp.where(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jax.lax.sort_key_val(lowerCamelCase , lowerCamelCase )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> List[str]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = filter_value
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = scores.shape
UpperCAmelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ = topk_scores.flatten()
UpperCAmelCase__ = topk_indices.flatten() + shift
UpperCAmelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase )
UpperCAmelCase__ = next_scores_flat.reshape(lowerCamelCase , lowerCamelCase )
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Any , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = bos_token_id
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Tuple , lowerCamelCase :int , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = max_length
UpperCAmelCase__ = eos_token_id
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :int , lowerCamelCase :int ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCAmelCase__ = min_length
UpperCAmelCase__ = eos_token_id
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :List[str] , lowerCamelCase :str ) -> Any:
UpperCAmelCase__ = list(lowerCamelCase )
UpperCAmelCase__ = begin_index
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[str] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :list ) -> Tuple:
UpperCAmelCase__ = list(lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = dict(lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ = force_token_array.at[index].set(lowerCamelCase )
UpperCAmelCase__ = jnp.intaa(lowerCamelCase )
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
def _force_token(lowerCamelCase :str ):
UpperCAmelCase__ = scores.shape[0]
UpperCAmelCase__ = self.force_token_array[generation_idx]
UpperCAmelCase__ = jnp.ones_like(lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ = lax.dynamic_update_slice(lowerCamelCase , lowerCamelCase , (0, current_token) )
return new_scores
UpperCAmelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple ) -> Dict:
UpperCAmelCase__ = generate_config.eos_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase , "max_initial_timestamp_index" ):
UpperCAmelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :Any ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase :int , lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase , )
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase , lowerCamelCase , )
return jnp.where(
lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase , )
UpperCAmelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ = jnp.where(
lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
def handle_cumulative_probs(lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] ):
UpperCAmelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
return scores
| 169 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( snake_case__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase ( self,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 3, 128, 128),rng=random.Random(_A ) )
A__ = np.random.RandomState(_A )
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
A__ = PNDMScheduler.from_config(pipe.scheduler.config,skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
# warmup pass to apply optimizations
A__ = pipe(**self.get_dummy_inputs() )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
A__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
A__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase ( self ):
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint,provider='''CPUExecutionProvider''' )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
A__ = self.get_dummy_inputs()
A__ = pipe(**_A ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self ):
A__ = ort.SessionOptions()
A__ = False
return options
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A__ = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''onnx''',safety_checker=_A,feature_extractor=_A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=_A )
A__ = '''A fantasy landscape, trending on artstation'''
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=_A,image=_A,strength=0.75,guidance_scale=7.5,num_inference_steps=10,generator=_A,output_type='''np''',)
A__ = output.images
A__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A__ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A__ = init_image.resize((768, 512) )
A__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''',subfolder='''scheduler''',revision='''onnx''' )
A__ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''',revision='''onnx''',scheduler=_A,safety_checker=_A,feature_extractor=_A,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=_A )
A__ = '''A fantasy landscape, trending on artstation'''
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=_A,image=_A,strength=0.75,guidance_scale=7.5,num_inference_steps=20,generator=_A,output_type='''np''',)
A__ = output.images
A__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A__ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 363 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor'''
__SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = False
super().__init__(__lowerCamelCase,__lowerCamelCase )
def __call__( self,__lowerCamelCase=None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 2048,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
A__ = self.tokenizer
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,**__lowerCamelCase )
else:
# add pixel_values and bbox
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,header_text=__lowerCamelCase,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
if "attention_mask" in text_encoding:
A__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
A__ = text_encoding.pop('''input_ids''' )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39 | 0 |
"""simple docstring"""
import numpy as np
def lowercase (snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase (snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_lowercase : Any = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_lowercase : Optional[Any] = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_lowercase : List[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str=None , lowercase_ : Any=1 , lowercase_ : str="binary" , lowercase_ : Union[str, Any]=None ):
lowercase_ : Optional[int] = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ )
return {"f1": float(lowercase_ ) if score.size == 1 else score}
| 21 | '''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _A ( lowercase , lowercase ):
"""simple docstring"""
def run_func(lowercase ):
@wraps(lowercase )
def run_in_eager_mode(*lowercase , **lowercase ):
return func(*lowercase , **lowercase )
@wraps(lowercase )
@tf.function(experimental_compile=lowercase )
def run_in_graph_mode(*lowercase , **lowercase ):
return func(*lowercase , **lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =random.Random()
a =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return tf.__version__
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> float:
# initialize GPU on separate process
a =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a =self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> float:
a =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a =self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
a =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a =self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
a =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a =self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Callable[[], None]:
a =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
a =(
hasattr(__A , '''architectures''' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a =__import__('''transformers''' , fromlist=[model_class] )
a =getattr(__A , __A )
a =model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
a =TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
a =config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size
a =random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
a =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Callable[[], None]:
a =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
a =(
hasattr(__A , '''architectures''' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a =__import__('''transformers''' , fromlist=[model_class] )
a =getattr(__A , __A )
a =model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
a =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
a =config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size
a =random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
a =model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
a =tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
a =model(__A , labels=__A , training=__A )[0]
a =tf.gradients(__A , model.trainable_variables )
return gradients
a =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE ( self , __A ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a =timeit.repeat(
__A , repeat=self.args.repeat , number=10 , )
return min(__A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def SCREAMING_SNAKE_CASE ( self , __A ) -> [Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
a =start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
a ='''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
a =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a =nvml.nvmlDeviceGetMemoryInfo(__A )
a =meminfo.used
a =Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
a =None
else:
a =measure_peak_memory_cpu(__A )
a =Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
a =stop_memory_tracing(__A )
if memory is None:
a =summary.total
else:
a =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None | 81 | import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''trocr'''
__A = ['''past_key_values''']
__A = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : int , lowercase_ : Any=50265 , lowercase_ : Tuple=1024 , lowercase_ : Any=12 , lowercase_ : str=16 , lowercase_ : Any=4096 , lowercase_ : str="gelu" , lowercase_ : Any=512 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : int=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : int=2 , **lowercase_ : Any , ) -> Any:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 63 | # Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
super().__init__(features=lowercase_)
_UpperCamelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any]) -> List[str]:
"""simple docstring"""
import torch
if isinstance(lowercase_ , lowercase_) and column:
if all(
isinstance(lowercase_ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(lowercase_)
return column
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
import torch
if isinstance(lowercase_ , (str, bytes, type(lowercase_))):
return value
elif isinstance(lowercase_ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
_UpperCamelCase = {}
if isinstance(lowercase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
_UpperCamelCase = {"dtype": torch.intaa}
elif isinstance(lowercase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
_UpperCamelCase = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase_ , PIL.Image.Image):
_UpperCamelCase = np.asarray(lowercase_)
return torch.tensor(lowercase_ , **{**default_dtype, **self.torch_tensor_kwargs})
def __UpperCAmelCase ( self : str , lowercase_ : Dict) -> Dict:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(lowercase_ , "__array__") and not isinstance(lowercase_ , torch.Tensor):
_UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase_ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase_) for substruct in data_struct])
elif isinstance(lowercase_ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowercase_) for substruct in data_struct])
return self._tensorize(lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : dict) -> Optional[int]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , lowercase_ , map_list=lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : pa.Table) -> Mapping:
"""simple docstring"""
_UpperCamelCase = self.numpy_arrow_extractor().extract_row(lowercase_)
_UpperCamelCase = self.python_features_decoder.decode_row(lowercase_)
return self.recursive_tensorize(lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : pa.Table) -> "torch.Tensor":
"""simple docstring"""
_UpperCamelCase = self.numpy_arrow_extractor().extract_column(lowercase_)
_UpperCamelCase = self.python_features_decoder.decode_column(lowercase_ , pa_table.column_names[0])
_UpperCamelCase = self.recursive_tensorize(lowercase_)
_UpperCamelCase = self._consolidate(lowercase_)
return column
def __UpperCAmelCase ( self : Tuple , lowercase_ : pa.Table) -> Mapping:
"""simple docstring"""
_UpperCamelCase = self.numpy_arrow_extractor().extract_batch(lowercase_)
_UpperCamelCase = self.python_features_decoder.decode_batch(lowercase_)
_UpperCamelCase = self.recursive_tensorize(lowercase_)
for column_name in batch:
_UpperCamelCase = self._consolidate(batch[column_name])
return batch
| 63 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ):
_lowerCAmelCase : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = output_size
# determine new height and width
_lowerCAmelCase : List[Any] = output_height / input_height
_lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase : Union[str, Any] = scale_width
else:
# fit height
_lowerCAmelCase : Union[str, Any] = scale_height
_lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase )
_lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase )
return (new_height, new_width)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase : Optional[int] = get_size_dict(__a)
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Any = keep_aspect_ratio
_lowerCAmelCase : str = ensure_multiple_of
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : List[Any] = get_resize_output_image_size(
__a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, )
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[Any] = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(__a)
_lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[Any] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : List[Any] = target_sizes.numpy()
_lowerCAmelCase : Dict = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : Dict = logits.argmax(dim=1)
_lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 36 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase__ ( __lowerCamelCase , unittest.TestCase ):
a__ : List[Any] = BertGenerationTokenizer
a__ : Optional[Any] = False
a__ : str = True
def __A ( self : Tuple ) -> Dict:
super().setUp()
__lowerCamelCase = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = '''<s>'''
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__lowercase ) , 10_02 )
def __A ( self : Optional[Any] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __A ( self : Optional[int] ) -> Tuple:
__lowerCamelCase = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [2_85, 46, 10, 1_70, 3_82] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __A ( self : int ) -> Any:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [1_85_36, 22_60, 1_01]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def __A ( self : Optional[Any] ) -> Dict:
__lowerCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__lowerCamelCase = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def __A ( self : List[Any] ) -> Dict:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowerCamelCase = ''' '''.join(__lowercase )
__lowerCamelCase = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase )
__lowerCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase )
__lowerCamelCase = BertGenerationConfig()
__lowerCamelCase = BertGenerationEncoder(__lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def __A ( self : int ) -> Union[str, Any]:
# fmt: off
__lowerCamelCase = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 350 |
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowerCamelCase , __lowerCamelCase = y, x % y
return abs(__lowerCAmelCase )
def __magic_name__ ( ) -> Tuple:
try:
__lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__lowerCamelCase = int(nums[0] )
__lowerCamelCase = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 339 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if not self.initialized:
UpperCamelCase :str = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :str = True
def UpperCAmelCase ( self ) -> Optional[int]:
self.retriever.index.init_index()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase :Optional[int] = self.retriever._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
if index is not None and index.is_initialized() and len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for worker in self.retrieval_workers
] )
def UpperCAmelCase ( self ) -> Optional[int]:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCamelCase :Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCamelCase , UpperCamelCase :Dict = ray.get(random_worker.retrieve.remote(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else:
UpperCamelCase , UpperCamelCase :int = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
return super(SCREAMING_SNAKE_CASE_ , cls ).get_tokenizers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :Union[str, Any] = kwargs.pop('''config''' , SCREAMING_SNAKE_CASE_ ) or RagConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Tuple = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = rag_tokenizer.question_encoder
UpperCamelCase :Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCamelCase :Optional[int] = '''custom'''
UpperCamelCase :List[str] = CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :List[str] = cls._build_index(SCREAMING_SNAKE_CASE_ )
return cls(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , retrieval_workers=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
| 259 |
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 358 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = False ) -> dict:
_snake_case = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def SCREAMING_SNAKE_CASE__ ( __A ) -> dict:
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase = '''xvjiarui/stable-diffusion-2-inpainting'''
UpperCAmelCase , UpperCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowercase , safety_checker=lowercase )
UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = 50
UpperCAmelCase = jax.device_count()
UpperCAmelCase = num_samples * [prompt]
UpperCAmelCase = num_samples * [init_image]
UpperCAmelCase = num_samples * [mask_image]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = pipeline.prepare_inputs(lowercase , lowercase , lowercase )
# shard inputs and rng
UpperCAmelCase = replicate(lowercase )
UpperCAmelCase = jax.random.split(lowercase , jax.device_count() )
UpperCAmelCase = shard(lowercase )
UpperCAmelCase = shard(lowercase )
UpperCAmelCase = shard(lowercase )
UpperCAmelCase = pipeline(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , jit=lowercase )
UpperCAmelCase = output.images.reshape(lowercase , 512 , 512 , 3 )
UpperCAmelCase = images[0, 253:256, 253:256, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 34 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A =input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
A =BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A =soup.find('meta', {'property': 'og:image'})['content']
A =requests.get(image_url).content
A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 34 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
_a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def _A (lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
_a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('P' ):
return True
return False
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = -1_0_0
_lowerCAmelCase = "pt"
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
import torch
_a = 'label' if 'label' in features[0].keys() else 'labels'
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['entity_ids'] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels
]
_a = [feature['ner_tags'] for feature in features]
_a = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ )
_a = [feature['original_entity_spans'] for feature in features]
_a = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ )
_a = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 | 1 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Dict = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ['''DeiTFeatureExtractor''']
lowercase : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case_ : List[Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : List[Any] = "sgugger/tiny-distilbert-classification"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
snake_case_ : int = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case_ : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : str = "sshleifer/tiny-gpt2"
snake_case_ : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Optional[Any] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , [config] )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : int = "sshleifer/tiny-gpt2"
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : List[str] = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Union[str, Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Dict = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "env.csv" ) ).exists() )
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "sequential" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "cumulative" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "current" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , eager_mode=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
snake_case_ : Tuple = TensorFlowBenchmark(_SCREAMING_SNAKE_CASE )
snake_case_ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , "log.txt" ) ).exists() )
| 36 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 178 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> int:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = FlaxRobertaModelTester(self )
@slow
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('roberta-base' , from_pt=a )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
| 178 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=1 / 255 , lowerCAmelCase__ : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_: Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Tuple = batch_size
SCREAMING_SNAKE_CASE_: Tuple = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE_: Tuple = max_resolution
SCREAMING_SNAKE_CASE_: List[Any] = do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Any = image_mean
SCREAMING_SNAKE_CASE_: Dict = image_std
SCREAMING_SNAKE_CASE_: Tuple = do_rescale
SCREAMING_SNAKE_CASE_: int = rescale_factor
SCREAMING_SNAKE_CASE_: int = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=False):
if not batched:
SCREAMING_SNAKE_CASE_: List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_: List[Any] = int(self.size["shortest_edge"] * h / w)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_: Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
SCREAMING_SNAKE_CASE_: int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_: int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE_: Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = DeformableDetrImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad"))
self.assertTrue(hasattr(lowerCAmelCase__ , "size"))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__)
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : str):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_: str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Any = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_: Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image and target
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: str = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_: str = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE_: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: str = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: str = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify orig_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: List[Any] = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_: int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
SCREAMING_SNAKE_CASE_: Any = DeformableDetrImageProcessor(format="coco_panoptic")
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: List[str] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: Any = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify masks
SCREAMING_SNAKE_CASE_: Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__)
# verify orig_size
SCREAMING_SNAKE_CASE_: str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
| 127 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCAmelCase : str = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = EfficientNetConfig()
SCREAMING_SNAKE_CASE_: Any = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE_: Optional[Any] = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE_: List[Any] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: int = 10_00
SCREAMING_SNAKE_CASE_: int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[Any] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE_: Optional[Any] = sorted(set(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
SCREAMING_SNAKE_CASE_: List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE_: List[str] = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE_: str = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE_: List[str] = "classifier.weight"
SCREAMING_SNAKE_CASE_: Optional[Any] = "classifier.bias"
return key_mapping
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE_: List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE_: Tuple = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model_classes[model_name](
include_top=_UpperCAmelCase , weights="imagenet" , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=10_00 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE_: Tuple = original_model.trainable_variables
SCREAMING_SNAKE_CASE_: Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE_: List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE_: str = param.numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE_: Any = get_efficientnet_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE_: Tuple = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE_: Optional[Any] = convert_image_processor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = hf_model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE_: Tuple = image.img_to_array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = np.expand_dims(_UpperCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE_: str = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
SCREAMING_SNAKE_CASE_: Optional[Any] = f"efficientnet-{model_name}"
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 127 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: float , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: bool = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = False
lowercase__ = nn.Dropout(p=UpperCamelCase_ )
lowercase__ = TaConfig(
vocab_size=UpperCamelCase_ , d_model=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_kv=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , feed_forward_proj=UpperCamelCase_ , is_decoder=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , )
lowercase__ = nn.ModuleList()
for lyr_num in range(UpperCamelCase_ ):
lowercase__ = TaBlock(UpperCamelCase_ )
self.encoders.append(UpperCamelCase_ )
lowercase__ = TaLayerNorm(UpperCamelCase_ )
lowercase__ = nn.Dropout(p=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.token_embedder(UpperCamelCase_ )
lowercase__ = encoder_input_tokens.shape[1]
lowercase__ = torch.arange(UpperCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase_ )
lowercase__ = self.dropout_pre(UpperCamelCase_ )
# inverted the attention mask
lowercase__ = encoder_input_tokens.size()
lowercase__ = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ )
for lyr in self.encoders:
lowercase__ = lyr(UpperCamelCase_ , UpperCamelCase_ )[0]
lowercase__ = self.layer_norm(UpperCamelCase_ )
return self.dropout_post(UpperCamelCase_ ), encoder_inputs_mask
| 110 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowercase__ = ''''''
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase__ = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 110 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any ):
"""simple docstring"""
UpperCamelCase = {}
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=1 ):
"""simple docstring"""
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
UpperCamelCase = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
UpperCamelCase = []
def A ( self : Optional[int] ):
"""simple docstring"""
return list(self.graph )
def A ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Any=-2 , UpperCamelCase__ : Tuple=-1 ):
"""simple docstring"""
if s == d:
return []
UpperCamelCase = []
UpperCamelCase = []
if s == -2:
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def A ( self : List[Any] , UpperCamelCase__ : Any=-1 ):
"""simple docstring"""
if c == -1:
UpperCamelCase = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any]=-2 ):
"""simple docstring"""
UpperCamelCase = deque()
UpperCamelCase = []
if s == -2:
UpperCamelCase = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
UpperCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
return len(self.graph[u] )
def A ( self : Tuple , UpperCamelCase__ : Tuple=-2 ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
if s == -2:
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = s
UpperCamelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = -2
UpperCamelCase = []
UpperCamelCase = s
UpperCamelCase = False
UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase = True
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = False
indirect_parents.append(UpperCamelCase__ )
UpperCamelCase = s
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = -2
UpperCamelCase = []
UpperCamelCase = s
UpperCamelCase = False
UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase = True
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = False
indirect_parents.append(UpperCamelCase__ )
UpperCamelCase = s
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any]=-2 , UpperCamelCase__ : Union[str, Any]=-1 ):
"""simple docstring"""
UpperCamelCase = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = time()
return end - begin
def A ( self : Dict , UpperCamelCase__ : int=-2 ):
"""simple docstring"""
UpperCamelCase = time()
self.bfs(UpperCamelCase__ )
UpperCamelCase = time()
return end - begin
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
UpperCamelCase = {}
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=1 ):
"""simple docstring"""
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
UpperCamelCase = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
UpperCamelCase = [[w, u]]
def A ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=-2 , UpperCamelCase__ : Tuple=-1 ):
"""simple docstring"""
if s == d:
return []
UpperCamelCase = []
UpperCamelCase = []
if s == -2:
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def A ( self : Optional[Any] , UpperCamelCase__ : int=-1 ):
"""simple docstring"""
if c == -1:
UpperCamelCase = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
UpperCamelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def A ( self : List[Any] , UpperCamelCase__ : Union[str, Any]=-2 ):
"""simple docstring"""
UpperCamelCase = deque()
UpperCamelCase = []
if s == -2:
UpperCamelCase = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
UpperCamelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def A ( self : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
return len(self.graph[u] )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = -2
UpperCamelCase = []
UpperCamelCase = s
UpperCamelCase = False
UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase = True
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = False
indirect_parents.append(UpperCamelCase__ )
UpperCamelCase = s
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
UpperCamelCase = -2
UpperCamelCase = []
UpperCamelCase = s
UpperCamelCase = False
UpperCamelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
UpperCamelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
UpperCamelCase = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
UpperCamelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCamelCase = True
if len(UpperCamelCase__ ) != 0:
UpperCamelCase = stack[len(UpperCamelCase__ ) - 1]
else:
UpperCamelCase = False
indirect_parents.append(UpperCamelCase__ )
UpperCamelCase = s
UpperCamelCase = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def A ( self : Union[str, Any] ):
"""simple docstring"""
return list(self.graph )
def A ( self : Tuple , UpperCamelCase__ : Dict=-2 , UpperCamelCase__ : str=-1 ):
"""simple docstring"""
UpperCamelCase = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = time()
return end - begin
def A ( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=-2 ):
"""simple docstring"""
UpperCamelCase = time()
self.bfs(UpperCamelCase__ )
UpperCamelCase = time()
return end - begin
| 249 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
UpperCamelCase = json.load(A__ )
UpperCamelCase = LukeConfig(use_entity_aware_attention=A__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase = torch.load(A__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
UpperCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase = AddedToken('<ent>' , lstrip=A__ , rstrip=A__ )
UpperCamelCase = AddedToken('<ent2>' , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase = json.load(A__ )
UpperCamelCase = 'MLukeTokenizer'
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
with open(os.path.join(A__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(A__ , A__ )
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
UpperCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase = state_dict[bias_name]
UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase = state_dict['entity_predictions.bias']
UpperCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase = state_dict[key]
else:
UpperCamelCase = state_dict[key]
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ , task='entity_classification' )
UpperCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase = (0, 9)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 33, 768) )
UpperCamelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 1, 768) )
UpperCamelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
UpperCamelCase = 'Tokyo is the capital of <mask>.'
UpperCamelCase = (24, 30)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
UpperCamelCase = encoding['input_ids'][0].tolist()
UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
UpperCamelCase = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A__ ) )
model.save_pretrained(A__ )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase = [json.loads(A__ ) for line in open(A__ )]
UpperCamelCase = {}
for entry in data:
UpperCamelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase = entity_id
break
UpperCamelCase = F"""{language}:{entity_name}"""
UpperCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 249 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Dict:
a : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : List[str] = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase__ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
] , )
@require_tf
def __a ( self ) -> int:
a : Tuple = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
a : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : int = image_classifier(lowerCAmelCase__ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
a : List[str] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
{"score": 0.333, "label": ANY(lowerCAmelCase__ )},
],
] , )
@slow
@require_torch
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : str = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : List[str] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __a ( self ) -> Optional[Any]:
a : List[str] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[Any] = image_classifier(lowerCAmelCase__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 105 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a : Union[str, Any] = grid[0]
for row_n in range(1 , len(_lowercase ) ):
a : Optional[Any] = grid[row_n]
a : Tuple = fill_row(_lowercase , _lowercase )
a : List[Any] = grid[row_n]
return grid[-1][-1]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 352 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.