code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase ( snake_case__ : list[list[int | float]] ) -> int:
UpperCamelCase : Tuple = len(snake_case__ )
UpperCamelCase : Tuple = len(matrix[0] )
UpperCamelCase : Tuple = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__ ):
UpperCamelCase : Dict = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase : Optional[int] = True
for i in range(row + 1 , snake_case__ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase : Union[str, Any] = matrix[i], matrix[row]
UpperCamelCase : List[str] = False
break
if reduce:
rank -= 1
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase__ = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase__ = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase__ = os.path.join(os.path.expanduser('''~'''), '''.cache''')
lowerCAmelCase__ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _A ( A__ , A__=False ):
"""simple docstring"""
__lowercase = model_type
if use_small:
key += "_small"
return os.path.join(A__ , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def _A ( A__ , A__ ):
"""simple docstring"""
os.makedirs(A__ , exist_ok=A__ )
hf_hub_download(repo_id=A__ , filename=A__ , local_dir=A__ )
def _A ( A__ , A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type == "text":
__lowercase = BarkSemanticModel
__lowercase = BarkSemanticConfig
__lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase = BarkCoarseModel
__lowercase = BarkCoarseConfig
__lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase = BarkFineModel
__lowercase = BarkFineConfig
__lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase = F"{model_type}_small" if use_small else model_type
__lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A__ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__lowercase = torch.load(A__ , map_location=A__ )
# this is a hack
__lowercase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__lowercase = model_args['''vocab_size''']
__lowercase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase = model_args.pop('''n_head''' )
__lowercase = model_args.pop('''n_embd''' )
__lowercase = model_args.pop('''n_layer''' )
__lowercase = ConfigClass(**checkpoint['''model_args'''] )
__lowercase = ModelClass(config=A__ )
__lowercase = GenerationConfigClass()
__lowercase = model_generation_config
__lowercase = checkpoint['''model''']
# fixup checkpoint
__lowercase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(A__ ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase = k[len(A__ ) :]
for old_layer_name in new_layer_name_dict:
__lowercase = new_k.replace(A__ , new_layer_name_dict[old_layer_name] )
__lowercase = state_dict.pop(A__ )
__lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(A__ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(A__ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(A__ , strict=A__ )
__lowercase = model.num_parameters(exclude_embeddings=A__ )
__lowercase = checkpoint['''best_val_loss'''].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(A__ , 3 )} loss" )
model.eval()
model.to(A__ )
del checkpoint, state_dict
return model
def _A ( A__ , A__=False , A__="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase = '''cpu''' # do conversion on cpu
__lowercase = _get_ckpt_path(A__ , use_small=A__ )
__lowercase = _load_model(A__ , A__ , model_type=A__ , use_small=A__ )
# load bark initial model
__lowercase = _bark_load_model(A__ , '''cpu''' , model_type=A__ , use_small=A__ )
if model_type == "text":
__lowercase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=A__ ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__lowercase = 5
__lowercase = 10
if model_type in ["text", "coarse"]:
__lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase = bark_model(A__ )[0]
__lowercase = model(A__ )
# take last logits
__lowercase = output_new_model_total.logits[:, [-1], :]
else:
__lowercase = 3
__lowercase = 8
__lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase = model(A__ , A__ )
__lowercase = bark_model(A__ , A__ )
__lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , ):
"""simple docstring"""
__lowercase = os.path.join(A__ , A__ )
__lowercase = BarkSemanticConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkCoarseConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = BarkFineConfig.from_pretrained(os.path.join(A__ , '''config.json''' ) )
__lowercase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkSemanticModel.from_pretrained(A__ )
__lowercase = BarkCoarseModel.from_pretrained(A__ )
__lowercase = BarkFineModel.from_pretrained(A__ )
__lowercase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__lowercase = BarkConfig.from_sub_model_configs(
A__ , A__ , A__ , A__ )
__lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase = BarkModel(A__ )
__lowercase = semantic
__lowercase = coarseAcoustic
__lowercase = fineAcoustic
__lowercase = codec
__lowercase = bark_generation_config
Path(A__ ).mkdir(exist_ok=A__ )
bark.save_pretrained(A__ , repo_id=A__ , push_to_hub=A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
lowerCAmelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase = 1_00_00_00 ) -> int:
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = {1: 1}
for inputa in range(2 ,__UpperCamelCase ):
lowerCamelCase_ = 0
lowerCamelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase_ = counter
if counter > pre_counter:
lowerCamelCase_ = inputa
lowerCamelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 42 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase_ : int = False
class UpperCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion",torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = pipe.dual_guided(
prompt="first prompt",image=__A,text_to_image_strength=0.75,generator=__A,guidance_scale=7.5,num_inference_steps=2,output_type="numpy",).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
_lowerCamelCase : Dict = VersatileDiffusionPipeline.from_pretrained(__A,torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Tuple = generator.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe.dual_guided(
prompt="first prompt",image=__A,text_to_image_strength=0.75,generator=__A,guidance_scale=7.5,num_inference_steps=2,output_type="numpy",).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion",torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Tuple = "cyberpunk 2077"
_lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : Tuple = pipe.dual_guided(
prompt=__A,image=__A,text_to_image_strength=0.75,generator=__A,guidance_scale=7.5,num_inference_steps=5_0,output_type="numpy",).images
_lowerCamelCase : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCamelCase : List[str] = "A painting of a squirrel eating a burger "
_lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = pipe.text_to_image(
prompt=__A,generator=__A,guidance_scale=7.5,num_inference_steps=5_0,output_type="numpy" ).images
_lowerCamelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_lowerCamelCase : int = pipe.image_variation(__A,generator=__A,output_type="numpy" ).images
_lowerCamelCase : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
import os
from distutils.util import strtobool
def A ( lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> List[str]:
for e in env_keys:
UpperCamelCase__ :Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def A ( lowercase__ : Optional[int] , lowercase__ : int=False ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( lowercase__ : Tuple , lowercase__ : Dict="no" ) -> List[Any]:
UpperCamelCase__ :Optional[int] = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 45 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCamelCase, _lowerCamelCase : List[str] = head.next, head
while fast and fast.next:
_lowerCamelCase : Union[str, Any] = fast.next.next
_lowerCamelCase : str = slow.next
_lowerCamelCase : Any = slow.next
_lowerCamelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCamelCase : int = None
while second:
_lowerCamelCase : Union[str, Any] = second.next
_lowerCamelCase : Dict = node
_lowerCamelCase : List[Any] = second
_lowerCamelCase : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCamelCase : List[Any] = node.next
_lowerCamelCase : Dict = head.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCamelCase : Tuple = head
while fast and fast.next:
_lowerCamelCase, _lowerCamelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCamelCase : Dict = [slow.val]
while slow.next:
_lowerCamelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCamelCase : Optional[int] = cur.next
return True
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCamelCase : Any = [pos]
_lowerCamelCase : Any = head.next
pos += 1
_lowerCamelCase : str = pos - 1
_lowerCamelCase : Optional[int] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCamelCase : Optional[int] = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 46 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 47 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ : str = logging.getLogger(__name__)
UpperCAmelCase__ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
snake_case__ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class A :
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The input training data file (a text file).'} )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
snake_case__ :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
snake_case__ :Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
snake_case__ :Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case__ :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
snake_case__ :bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
if self.train_file is not None:
lowerCAmelCase__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCAmelCase__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = [json.loads(UpperCamelCase_ ) for line in f.read().splitlines() if (len(UpperCamelCase_ ) > 0 and not line.isspace())]
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = {c: dataset[c] for c in dataset.column_names}
lowerCAmelCase__ = refs
return Dataset.from_dict(UpperCamelCase_ )
def A ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowerCAmelCase__ = {}
if data_args.train_file is not None:
lowerCAmelCase__ = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase__ = data_args.validation_file
lowerCAmelCase__ = data_args.train_file.split("." )[-1]
if extension == "txt":
lowerCAmelCase__ = "text"
lowerCAmelCase__ = load_dataset(UpperCamelCase_ , data_files=UpperCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
lowerCAmelCase__ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase_ )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
lowerCAmelCase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase__ = AutoModelForMaskedLM.from_config(UpperCamelCase_ )
model.resize_token_embeddings(len(UpperCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCAmelCase__ = datasets["train"].column_names
else:
lowerCAmelCase__ = datasets["validation"].column_names
lowerCAmelCase__ = "text" if "text" in column_names else column_names[0]
lowerCAmelCase__ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase_ : int ):
# Remove empty lines
lowerCAmelCase__ = [line for line in examples["text"] if len(UpperCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=data_args.max_seq_length )
lowerCAmelCase__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCAmelCase__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCAmelCase__ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCAmelCase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCAmelCase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCAmelCase__ = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase__ = model_args.model_name_or_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase__ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = math.exp(eval_output["eval_loss"] )
lowerCAmelCase__ = perplexity
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def A ( UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 48 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
"""simple docstring"""
_lowercase : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 10_00_00_00
_lowercase : List[Any] = True
_lowercase : Any = False
def lowercase__ ( snake_case_ :int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCAmelCase = chain(next_number(snake_case_ ) )
__UpperCAmelCase = number_chain
while number < 10_000_000:
__UpperCAmelCase = number_chain
number *= 10
return number_chain
def lowercase__ ( snake_case_ :int = 10_000_000 ):
for i in range(1 , snake_case_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 49 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_snake_case , _snake_case : str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_snake_case : int = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_snake_case : Optional[Any] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case : Optional[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 53 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase : List[Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
_snake_case =10000
_snake_case =None
_snake_case =None
class A ( datasets.ArrowBasedBuilder ):
_snake_case =ParquetConfig
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
UpperCAmelCase_ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCAmelCase ):
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =datasets.Features.from_arrow_schema(pq.read_schema(_lowerCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: pa.Table ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ =table_cast(_lowerCAmelCase , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =pq.ParquetFile(_lowerCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise
| 54 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = botoa.client("iam" )
__A = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ , AssumeRolePolicyDocument=json.dumps(a_ , indent=2 ) )
__A = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(a_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = botoa.client("iam" )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , a_ , )
__A = None
if credentials_configuration == 0:
__A = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
__A = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
__A = _ask_field("AWS Access Key ID: " )
__A = aws_access_key_id
__A = _ask_field("AWS Secret Access Key: " )
__A = aws_secret_access_key
__A = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
__A = aws_region
__A = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , a_ , )
if role_management == 0:
__A = _ask_field("Enter your IAM role name: " )
else:
__A = "accelerate_sagemaker_execution_role"
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(a_ )
__A = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
__A = None
if is_custom_docker_image:
__A = _ask_field("Enter your Docker image: " , lambda a_ : str(a_ ).lower() )
__A = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
__A = None
if is_sagemaker_inputs_enabled:
__A = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda a_ : str(a_ ).lower() , )
__A = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
__A = None
if is_sagemaker_metrics_enabled:
__A = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda a_ : str(a_ ).lower() , )
__A = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
__A = {}
__A = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_dynamo:
__A = "dynamo_"
__A = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__A = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_custom_options:
__A = _ask_options(
"Which mode do you want to use?" , a_ , lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] , default="default" , )
__A = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
__A = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
__A = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
__A = _ask_options(
a_ , a_ , lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__A = _ask_field(a_ , lambda a_ : str(a_ ).lower() , default="ml.p3.2xlarge" )
__A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__A = _ask_field(
"How many machines do you want use? [1]: " , a_ , default=1 , )
__A = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=a_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a_ , use_cpu=a_ , dynamo_config=a_ , eca_instance_type=a_ , profile=a_ , region=a_ , iam_role_name=a_ , mixed_precision=a_ , num_machines=a_ , sagemaker_inputs_file=a_ , sagemaker_metrics_file=a_ , )
| 55 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Dict = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
A_ : Optional[Any] = {'allegro/herbert-base-cased': 514}
A_ : Dict = {}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =VOCAB_FILES_NAMES
a : Tuple =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =HerbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="</s>" , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sep_token=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: List[str] = [self.cls_token_id]
UpperCamelCase_: List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Any = [self.sep_token_id]
UpperCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 57 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Dict = tau * frequency / samplerate
snake_case_ : List[Any] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : List[Any] = (1 - _cos) / 2
snake_case_ : Any = 1 - _cos
snake_case_ : Union[str, Any] = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : str = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Optional[Any] = tau * frequency / samplerate
snake_case_ : Dict = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : Optional[int] = _sin / (2 * q_factor)
snake_case_ : List[Any] = (1 + _cos) / 2
snake_case_ : Union[str, Any] = -1 - _cos
snake_case_ : Any = 1 + alpha
snake_case_ : Any = -2 * _cos
snake_case_ : List[Any] = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tau * frequency / samplerate
snake_case_ : Optional[int] = sin(__UpperCamelCase )
snake_case_ : Dict = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : str = _sin / 2
snake_case_ : Any = 0
snake_case_ : List[Any] = -ba
snake_case_ : List[Any] = 1 + alpha
snake_case_ : List[str] = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Tuple = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : List[str] = _sin / (2 * q_factor)
snake_case_ : str = 1 - alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : int = 1 + alpha
snake_case_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : str = sin(__UpperCamelCase )
snake_case_ : Union[str, Any] = cos(__UpperCamelCase )
snake_case_ : List[str] = _sin / (2 * q_factor)
snake_case_ : Tuple = 1_0 ** (gain_db / 4_0)
snake_case_ : List[Any] = 1 + alpha * big_a
snake_case_ : Optional[int] = -2 * _cos
snake_case_ : Dict = 1 - alpha * big_a
snake_case_ : List[Any] = 1 + alpha / big_a
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Any = 1 - alpha / big_a
snake_case_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Optional[int] = tau * frequency / samplerate
snake_case_ : Optional[int] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : Optional[Any] = _sin / (2 * q_factor)
snake_case_ : Tuple = 1_0 ** (gain_db / 4_0)
snake_case_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : List[str] = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : List[Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : Optional[Any] = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[int] = big_a * (pmc + aaa)
snake_case_ : int = 2 * big_a * mpc
snake_case_ : List[Any] = big_a * (pmc - aaa)
snake_case_ : Optional[Any] = ppmc + aaa
snake_case_ : List[Any] = -2 * pmpc
snake_case_ : Optional[Any] = ppmc - aaa
snake_case_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[str] = tau * frequency / samplerate
snake_case_ : List[str] = sin(__UpperCamelCase )
snake_case_ : List[str] = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : Optional[int] = 1_0 ** (gain_db / 4_0)
snake_case_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Dict = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : int = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Tuple = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : str = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : str = big_a * (ppmc + aaa)
snake_case_ : List[Any] = -2 * big_a * pmpc
snake_case_ : Union[str, Any] = big_a * (ppmc - aaa)
snake_case_ : Any = pmc + aaa
snake_case_ : Dict = 2 * mpc
snake_case_ : List[Any] = pmc - aaa
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 58 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =tempfile.mkdtemp()
lowerCamelCase__: Any =8
# DPR tok
lowerCamelCase__: Tuple =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__: int =os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Any =os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
lowerCamelCase__: str =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Tuple =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase__: Any ={"unk_token": "<unk>"}
lowerCamelCase__: str =os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Dict =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
return dataset
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: int =dataset
lowerCamelCase__: str =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : bool) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =self.get_dummy_dataset()
lowerCamelCase__: int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , "dataset")
lowerCamelCase__: Tuple =os.path.join(self.tmpdirname , "index.faiss")
dataset.get_index("embeddings").save(os.path.join(self.tmpdirname , "index.faiss"))
dataset.drop_index("embeddings")
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset"))
del dataset
lowerCamelCase__: Dict =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase__: Tuple =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_) , )
return retriever
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1), 2 * np.ones(self.retrieval_vector_size + 1)],
})
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT)
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index")
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr")
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb"))
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl")
lowerCamelCase__: str ={sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb"))
lowerCamelCase__: Dict =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase__: Any =RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer())
return retriever
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: List[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: Union[str, Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
lowerCamelCase__: List[Any] =self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Any =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =1
lowerCamelCase__: Dict =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
lowerCamelCase__: str =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["embeddings", "id", "text", "title"])
self.assertEqual(len(doc_dicts[0]["id"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["id"][0] , "1") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[Any] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: str =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =1
lowerCamelCase__: Optional[int] =self.get_dummy_legacy_index_retriever()
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertEqual(len(UpperCAmelCase_) , 2)
self.assertEqual(sorted(doc_dicts[0]) , ["text", "title"])
self.assertEqual(len(doc_dicts[0]["text"]) , UpperCAmelCase_)
self.assertEqual(doc_dicts[0]["text"][0] , "bar") # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo") # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]])
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =RagRetriever.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Tuple =retriever.retrieve(UpperCAmelCase_ , n_docs=1)
self.assertTrue(out is not None)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
import torch
lowerCamelCase__: Union[str, Any] =1
lowerCamelCase__: Optional[Any] =self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__: str =[[5, 7], [10, 11]]
lowerCamelCase__: List[str] =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: Optional[int] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =(
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , np.ndarray)
lowerCamelCase__: Optional[Any] =retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size))
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor)
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase__: Dict =1
lowerCamelCase__: Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_)
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =[[5, 7], [10, 11]]
lowerCamelCase__: Any =np.array(
[np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)] , dtype=np.floataa)
lowerCamelCase__: List[Any] =retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_)
self.assertEqual(
len(UpperCAmelCase_) , 6) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask")) , UpperCAmelCase_) # check for doc token related keys in dictionary.
| 59 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase_ = 3
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
snake_case_ : Any = random.randrange(3 , _UpperCamelCase )
if pow(_UpperCamelCase , 2 , _UpperCamelCase ) == 1:
continue
if pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) == 1:
continue
return g
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
snake_case_ : Optional[int] = rabin_miller.generate_large_prime(_UpperCamelCase ) # select large prime number.
snake_case_ : str = primitive_root(_UpperCamelCase ) # one primitive root on modulo p.
snake_case_ : Union[str, Any] = random.randrange(3 , _UpperCamelCase ) # private_key -> have to be greater than 2 for safety.
snake_case_ : Optional[int] = cryptomath.find_mod_inverse(pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
snake_case_ : Dict = (key_size, e_a, e_a, p)
snake_case_ : Any = (key_size, d)
return public_key, private_key
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
snake_case_ , snake_case_ : str = generate_key(_UpperCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "words.txt" )
lowerCAmelCase__ = ""
with open(lowerCAmelCase_ ) as f:
lowerCAmelCase__ = f.readline()
lowerCAmelCase__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowerCAmelCase__ = [
word
for word in [sum(ord(lowerCAmelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 61 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowercase , lowercase , bias=lowercase )
SCREAMING_SNAKE_CASE : Any = emb.weight.data
return lin_layer
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE : Optional[int] = Namespace(**checkpoint["cfg"]["model"] )
SCREAMING_SNAKE_CASE : str = checkpoint["model"]
remove_ignore_keys_(lowercase )
SCREAMING_SNAKE_CASE : str = state_dict["decoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Any = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
SCREAMING_SNAKE_CASE : List[str] = XGLMConfig(
vocab_size=lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
SCREAMING_SNAKE_CASE : Tuple = XGLMForCausalLM(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = model.load_state_dict(lowercase , strict=lowercase )
print(lowercase )
SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
snake_case = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 62 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
from __future__ import annotations
import requests
a : List[Any] = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "new" , __lowerCamelCase : list | None = None ):
__UpperCAmelCase : int = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowerCamelCase ) - valid_terms ) ):
__UpperCAmelCase : List[Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 429:
raise requests.HTTPError
__UpperCAmelCase : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowerCamelCase )}
__UpperCAmelCase : Tuple = {}
for id_ in range(__lowerCamelCase ):
__UpperCAmelCase : str = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 63 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
def A__ ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
SCREAMING_SNAKE_CASE__: str= []
def generate(snake_case_ : int , snake_case_ : list ):
SCREAMING_SNAKE_CASE__: int= [0] * n
res.append(tuple(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Tuple= 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= arr[i], arr[c[i]]
res.append(tuple(snake_case_ ) )
c[i] += 1
SCREAMING_SNAKE_CASE__: Dict= 0
else:
SCREAMING_SNAKE_CASE__: Any= 0
i += 1
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
lowercase_ : Any = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowercase ( __lowerCamelCase ):
def __lowercase ( self : int ,A : Dict=None ,A : Optional[Any]=None ,A : Any=None ,**A : Dict ):
'''simple docstring'''
if tokenize_kwargs is None:
UpperCAmelCase__ : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
UpperCAmelCase__ : List[str] = truncation
UpperCAmelCase__ : Union[str, Any] = tokenize_kwargs
UpperCAmelCase__ : List[Any] = {}
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def __lowercase ( self : Optional[int] ,A : str ,**A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.framework
UpperCAmelCase__ : Optional[int] = self.tokenizer(A ,return_tensors=A ,**A )
return model_inputs
def __lowercase ( self : Optional[int] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model(**A )
return model_outputs
def __lowercase ( self : Union[str, Any] ,A : Dict ,A : Dict=False ):
'''simple docstring'''
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Union[str, Any] ,*A : Tuple ,**A : Optional[int] ):
'''simple docstring'''
return super().__call__(*A ,**A )
| 65 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
import string
import numpy
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ :
_UpperCamelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_UpperCamelCase : Tuple = numpy.vectorize(lambda __snake_case : x % 36 )
_UpperCamelCase : Any = numpy.vectorize(__snake_case )
def __init__( self , _lowerCAmelCase ):
_lowercase : str = self.modulus(_lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase : int = encrypt_key.shape[0]
def __a ( self , _lowerCAmelCase ):
return self.key_string.index(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return self.key_string[round(_lowerCAmelCase )]
def __a ( self ):
_lowercase : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : int = det % len(self.key_string )
_lowercase : List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCAmelCase , len(self.key_string ) ) != 1:
_lowercase : str = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = [char for char in text.upper() if char in self.key_string]
_lowercase : Union[str, Any] = chars[-1]
while len(_lowerCAmelCase ) % self.break_key != 0:
chars.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.process_text(text.upper() )
_lowercase : Dict = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : str = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Union[str, Any] = numpy.array([vec] ).T
_lowercase : List[Any] = self.modulus(self.encrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[
0
]
_lowercase : Any = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __a ( self ):
_lowercase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : Any = det % len(self.key_string )
_lowercase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase : Union[str, Any] = i
break
_lowercase : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.make_decrypt_key()
_lowercase : Union[str, Any] = self.process_text(text.upper() )
_lowercase : List[str] = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : Any = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Optional[int] = numpy.array([vec] ).T
_lowercase : int = self.modulus(decrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[0]
_lowercase : str = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __magic_name__ ( ) -> None:
_lowercase : Any = int(input('Enter the order of the encryption key: ' ) )
_lowercase : Any = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [int(SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE )
_lowercase : int = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(SCREAMING_SNAKE_CASE ) )
elif option == "2":
_lowercase : List[str] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 66 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Any=[] ) -> Any:
_lowercase = size[0] - overlap_pixels * 2
_lowercase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase = np.pad(snake_case__ , mode='linear_ramp' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
_lowercase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[str] ) -> Dict:
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :[int] , snake_case__ :[int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :int , snake_case__ :[int] ) -> Dict:
_lowercase = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Dict , snake_case__ :str ) -> Tuple:
_lowercase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Tuple ) -> List[str]:
_lowercase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase = tile.crop(snake_case__ )
return tile
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Optional[int] ) -> Tuple:
_lowercase = n % d
return n - divisor
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : AutoencoderKL ,__A : CLIPTextModel ,__A : CLIPTokenizer ,__A : UNetaDConditionModel ,__A : DDPMScheduler ,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__A : int = 350 ,) -> Union[str, Any]:
super().__init__(
vae=__A ,text_encoder=__A ,tokenizer=__A ,unet=__A ,low_res_scheduler=__A ,scheduler=__A ,max_noise_level=__A ,)
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Tuple ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Optional[int] ,**__A : List[Any] ) -> int:
torch.manual_seed(0 )
_lowercase = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
_lowercase = add_overlap_rect(__A ,__A ,image.size )
_lowercase = image.crop(__A )
_lowercase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase = translated_slice_x - (original_image_slice / 2)
_lowercase = max(0 ,__A )
_lowercase = squeeze_tile(__A ,__A ,__A ,__A )
_lowercase = to_input.size
_lowercase = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
_lowercase = super(__A ,self ).__call__(image=__A ,**__A ).images[0]
_lowercase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
_lowercase = unsqueeze_tile(__A ,__A )
_lowercase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
_lowercase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__A ) ,mode='L' ,)
final_image.paste(
__A ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__A )
@torch.no_grad()
def __call__( self : List[Any] ,__A : Union[str, List[str]] ,__A : Union[PIL.Image.Image, List[PIL.Image.Image]] ,__A : int = 75 ,__A : float = 9.0 ,__A : int = 50 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,__A : int = 128 ,__A : int = 32 ,__A : int = 32 ,) -> int:
_lowercase = Image.new('RGB' ,(image.size[0] * 4, image.size[1] * 4) )
_lowercase = math.ceil(image.size[0] / tile_size )
_lowercase = math.ceil(image.size[1] / tile_size )
_lowercase = tcx * tcy
_lowercase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A ,__A ,__A ,__A ,__A ,__A ,__A ,prompt=__A ,num_inference_steps=__A ,guidance_scale=__A ,noise_level=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,)
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# Run a demo
_lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowercase = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase = pipe.to('cuda' )
_lowercase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(snake_case__ :int ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase = pipe(image=snake_case__ , prompt='Black font, white background, vector' , noise_level=40 , callback=snake_case__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 67 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , a_ : Optional[Any] , a_ : Dict=13 , a_ : List[Any]=7 , a_ : int=True , a_ : List[str]=True , a_ : List[Any]=True , a_ : Union[str, Any]=True , a_ : Tuple=99 , a_ : List[Any]=32 , a_ : str=5 , a_ : Optional[int]=4 , a_ : int=37 , a_ : Optional[Any]="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : Dict=512 , a_ : Optional[int]=16 , a_ : Dict=2 , a_ : str=0.02 , a_ : List[Any]=False , a_ : Optional[Any]=True , a_ : Optional[Any]="None" , a_ : List[Any]=3 , a_ : Tuple=4 , a_ : int=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = relative_attention
__snake_case = position_biased_input
__snake_case = pos_att_type
__snake_case = scope
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.get_config()
__snake_case = 300
return config
def A ( self : Union[str, Any] , a_ : Any ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A ( self : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , a_ : Tuple , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
__snake_case = model(a_ , token_type_ids=a_ )[0]
__snake_case = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A ( self : Optional[int] , a_ : List[Any] , a_ : Tuple , a_ : Tuple , a_ : List[Any] , a_ : Optional[int] , a_ : Any , a_ : Tuple ):
"""simple docstring"""
__snake_case = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , a_ : List[str] , a_ : int , a_ : Optional[Any] , a_ : List[Any] , a_ : Optional[Any] , a_ : List[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def A ( self : Optional[Any] , a_ : str , a_ : Tuple , a_ : Any , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , a_ : str , a_ : List[str] , a_ : Tuple , a_ : str , a_ : str , a_ : List[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = DebertaModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
__snake_case = DebertaModel.from_pretrained("microsoft/deberta-base" )
__snake_case = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
__snake_case = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 69 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = (boundary[1] - boundary[0]) / steps
lowerCamelCase_ = boundary[0]
lowerCamelCase_ = boundary[1]
lowerCamelCase_ = make_points(lowercase , lowercase , lowercase )
lowerCamelCase_ = 0.0
y += (h / 2.0) * f(lowercase )
for i in x_i:
# print(i)
y += h * f(lowercase )
y += (h / 2.0) * f(lowercase )
return y
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = a + h
while x < (b - h):
yield x
lowerCamelCase_ = x + h
def _SCREAMING_SNAKE_CASE ( lowercase : str ): # enter your function here
'''simple docstring'''
lowerCamelCase_ = (x - 0) * (x - 0)
return y
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0.0 # Lower bound of integration
lowerCamelCase_ = 1.0 # Upper bound of integration
lowerCamelCase_ = 10.0 # define number of steps or resolution
lowerCamelCase_ = [a, b] # define boundary of integration
lowerCamelCase_ = method_a(lowercase , lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 70 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
from math import sqrt
def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00_00_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a_ : Tuple = logging.get_logger(__name__)
class _snake_case ( A__ ):
def __init__( self , *a , **a) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 73 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *_A : Union[str, Any] , **_A : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *_A : int , **_A : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *_A : List[str] , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *_A : Any , **_A : Any ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *_A : List[str] , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *_A : int , **_A : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *_A : Tuple , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *_A : Union[str, Any] , **_A : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *_A : Dict , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Tuple , **_A : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *_A : Any , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *_A : Optional[int] , **_A : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *_A : Tuple , **_A : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *_A : Dict , **_A : Tuple ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *_A : Union[str, Any] , **_A : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Optional[Any] , **_A : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Optional[Any] , **_A : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *_A : Optional[int] , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Tuple , **_A : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *_A : Dict , **_A : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *_A : List[Any] , **_A : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *_A : Tuple , **_A : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *_A : Tuple , **_A : Any ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Tuple , **_A : Tuple ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *_A : Optional[int] , **_A : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *_A : int , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Any , **_A : Any ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[str] , *_A : Optional[Any] , **_A : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *_A : Optional[int] , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *_A : int , **_A : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
| 74 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
from manim import *
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : Optional[int] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCAmelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Any = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Optional[int] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Optional[Any] = VGroup(_A , _A ).arrange(_A , buff=0 )
UpperCAmelCase__ : List[str] = Text('''CPU''' , font_size=24 )
UpperCAmelCase__ : Optional[int] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_A )
UpperCAmelCase__ : Union[str, Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[str] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : Dict = Text('''GPU''' , font_size=24 )
UpperCAmelCase__ : str = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
gpu.move_to([-1, -1, 0] )
self.add(_A )
UpperCAmelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Optional[Any] = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : int = Text('''Model''' , font_size=24 )
UpperCAmelCase__ : Union[str, Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
model.move_to([3, -1.0, 0] )
self.add(_A )
UpperCAmelCase__ : List[str] = []
for i, rect in enumerate(_A ):
rect.set_stroke(_A )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : str = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_A )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_A , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_A , buff=0.0 )
self.add(_A )
cpu_targs.append(_A )
UpperCAmelCase__ : str = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : int = VGroup(*_A ).arrange(_A , buff=0 )
UpperCAmelCase__ : List[str] = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase__ : Optional[int] = Group(_A , _A ).arrange(_A , aligned_edge=_A , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_A , _A )
UpperCAmelCase__ : List[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[int] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_A ) , Write(_A ) )
self.play(Write(_A , run_time=1 ) , Create(_A , run_time=1 ) )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Dict = []
for i, rect in enumerate(_A ):
UpperCAmelCase__ : int = fill.copy().set_fill(_A , opacity=0.7 )
target.move_to(_A )
first_animations.append(GrowFromCenter(_A , run_time=1 ) )
UpperCAmelCase__ : Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_A , run_time=1.5 ) )
self.play(*_A )
self.play(*_A )
self.wait()
| 75 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return np.where(vector > 0 , __UpperCamelCase , (alpha * (np.exp(__UpperCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
A = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
A = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]="[UNK]" , UpperCamelCase_ : Union[str, Any]="[SEP]" , UpperCamelCase_ : List[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : Tuple="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars
):
__UpperCAmelCase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop("type"))
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : List[str] = strip_accents
__UpperCAmelCase : Any = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**UpperCamelCase_)
__UpperCAmelCase : List[Any] = do_lower_case
def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
| 77 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __A ( UpperCamelCase__ ):
a__ : Any = """facebook/nllb-200-distilled-600M"""
a__ : Union[str, Any] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
a__ : int = """translator"""
a__ : Optional[Any] = AutoTokenizer
a__ : Optional[Any] = AutoModelForSeqaSeqLM
a__ : Dict = LANGUAGE_CODES
a__ : Union[str, Any] = ["""text""", """text""", """text"""]
a__ : Union[str, Any] = ["""text"""]
def _lowercase (self : int , __a : int , __a : Dict , __a : int ):
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
UpperCAmelCase_ = self.lang_to_code[src_lang]
UpperCAmelCase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors="pt" , src_lang=__a , tgt_lang=__a )
def _lowercase (self : Optional[Any] , __a : List[str] ):
return self.model.generate(**__a )
def _lowercase (self : Any , __a : Tuple ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 78 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
def wrapper(*__lowerCamelCase , **__lowerCamelCase ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Union[str, Any] = func(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase__ : int = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = seq_shapes or {}
for i in range(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
UpperCAmelCase__ : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Optional[int] = """The small grey turtle was surprisingly fast when challenged."""
else:
UpperCAmelCase__ : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : str = seq_shapes[k]
UpperCAmelCase__ : Any = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
UpperCAmelCase__ : Any = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Any = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 79 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , _lowerCAmelCase : Optional[Any]=[2, 2, 3, 2] , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Tuple=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case :List[str] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case :str = True
__snake_case :Any = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :int = False
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _a ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case :str = ConvNextConfig
__snake_case :Optional[Any] = False
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 80 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( __lowerCamelCase ):
return x + 2
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Any:
__snake_case : Optional[Any] = "x = 3"
__snake_case : Any = {}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
__snake_case : str = "x = y"
__snake_case : List[Any] = {"y": 5}
__snake_case : Any = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 5, "y": 5} )
def __snake_case ( self : Any ) -> List[str]:
__snake_case : int = "y = add_two(x)"
__snake_case : Any = {"x": 3}
__snake_case : str = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__snake_case : str = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def __snake_case ( self : Dict ) -> str:
__snake_case : str = "x = 3"
__snake_case : List[Any] = {}
__snake_case : List[Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}"
__snake_case : Tuple = {"x": 3}
__snake_case : List[Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Optional[int] ) -> int:
__snake_case : int = "x = 3\ny = 5"
__snake_case : Optional[int] = {}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
def __snake_case ( self : Dict ) -> Tuple:
__snake_case : List[Any] = "text = f'This is x: {x}.'"
__snake_case : List[Any] = {"x": 3}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase , {"x": 3, "text": "This is x: 3."} )
def __snake_case ( self : Any ) -> Dict:
__snake_case : List[str] = "if x <= 3:\n y = 2\nelse:\n y = 5"
__snake_case : Tuple = {"x": 3}
__snake_case : int = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 2} )
__snake_case : str = {"x": 8}
__snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 8, "y": 5} )
def __snake_case ( self : int ) -> int:
__snake_case : Tuple = "test_list = [x, add_two(x)]"
__snake_case : List[str] = {"x": 3}
__snake_case : Any = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertListEqual(lowerCamelCase , [3, 5] )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Optional[int] = "y = x"
__snake_case : Any = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 3} )
def __snake_case ( self : Any ) -> Any:
__snake_case : Optional[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
__snake_case : str = {"x": 3}
__snake_case : Optional[int] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
__snake_case : str = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__snake_case : Optional[Any] = {"x": 3}
__snake_case : Union[str, Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = "x = 0\nfor i in range(3):\n x = i"
__snake_case : Union[str, Any] = {}
__snake_case : Any = evaluate(lowerCamelCase , {"range": range} , state=lowerCamelCase )
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 2, "i": 2} )
| 81 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase__ ):
UpperCAmelCase_ = min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ugly_nums.append(lowerCAmelCase__ )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 82 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand()
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class __snake_case ( _lowercase):
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : ArgumentParser ):
"""simple docstring"""
_lowerCamelCase : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowerCAmelCase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=__lowerCAmelCase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , *__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = accelerate_config_file
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase : List[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_lowerCamelCase : List[Any] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_lowerCamelCase : Optional[Any] = '''not installed'''
_lowerCamelCase : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase : List[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCamelCase : Tuple = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else f'''\t{accelerate_config}'''
)
_lowerCamelCase : Dict = '''not installed'''
_lowerCamelCase : Any = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase : Union[str, Any] = torch.__version__
_lowerCamelCase : Dict = torch.cuda.is_available()
_lowerCamelCase : Optional[int] = '''not installed'''
_lowerCamelCase : List[str] = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase : int = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase : Optional[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase : Any = bool(tf.config.list_physical_devices('''GPU''' ) )
_lowerCamelCase : Tuple = '''not installed'''
_lowerCamelCase : Any = '''not installed'''
_lowerCamelCase : Optional[int] = '''not installed'''
_lowerCamelCase : Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase : Union[str, Any] = flax.__version__
_lowerCamelCase : Optional[int] = jax.__version__
_lowerCamelCase : Any = jaxlib.__version__
_lowerCamelCase : Any = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase : str = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowerCAmelCase ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : Dict ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 83 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = LDMTextToImagePipeline
_UpperCamelCase : Any = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
_UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(snake_case )
lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=0 ):
if str(snake_case ).startswith('mps' ):
lowercase = torch.manual_seed(snake_case )
else:
lowercase = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = LDMTextToImagePipeline(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs(snake_case )
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowercase = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=torch.floataa , snake_case=0 ):
lowercase = torch.manual_seed(snake_case )
lowercase = np.random.RandomState(snake_case ).standard_normal((1, 4, 32, 32) )
lowercase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_inputs(snake_case )
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowercase = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
lowercase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=torch.floataa , snake_case=0 ):
lowercase = torch.manual_seed(snake_case )
lowercase = np.random.RandomState(snake_case ).standard_normal((1, 4, 32, 32) )
lowercase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_inputs(snake_case )
lowercase = pipe(**snake_case ).images[0]
lowercase = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
lowercase = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 84 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
def _a ( lowercase__ : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = number
while duplicate > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = divmod(lowercase__ , 10 )
fact_sum += factorial(lowercase__ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
SCREAMING_SNAKE_CASE__ : List[Any] = int(input("Enter number: ").strip())
print(
F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 85 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Any=13 , UpperCAmelCase : Any=30 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=True , UpperCAmelCase : Dict=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : str=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=2 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
A_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self : Union[str, Any] ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = ViTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
A_ = ViTForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ = 1
A_ = ViTForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = self.type_sequence_label_size
A_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = ViTForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : Optional[Any] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : List[Any] = True
_lowerCamelCase : List[str] = False
_lowerCamelCase : str = False
_lowerCamelCase : Tuple = False
def __A ( self : Optional[Any] ):
A_ = ViTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def __A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __A ( self : Tuple ):
pass
def __A ( self : int ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A ( self : Tuple ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A ( self : Tuple ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __A ( self : str ):
A_ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCAmelCase )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Any ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCAmelCase )
A_ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
A_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(UpperCAmelCase , interpolate_pos_encoding=UpperCAmelCase )
# verify the logits
A_ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
A_ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __A ( self : Optional[int] ):
A_ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" )
A_ = inputs.pixel_values.to(UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ = model(UpperCAmelCase )
| 86 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , lowercase_ , (alpha * (np.exp(lowercase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def UpperCamelCase_ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Union[str, Any]:
pass
def _snake_case ( __snake_case : Image ):
"""simple docstring"""
_lowerCamelCase : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : int = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Tuple = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)} , SCREAMING_SNAKE_CASE)
import datasets
_lowerCamelCase : str = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""")
_lowerCamelCase : int = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
])
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)},
{"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)},
{"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)},
{"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)},
{"""predicted_depth""": ANY(torch.Tensor), """depth""": ANY(Image.Image)},
] , SCREAMING_SNAKE_CASE , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""")
def UpperCamelCase_ ( self) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Dict = """Intel/dpt-large"""
_lowerCamelCase : str = pipeline("""depth-estimation""" , model=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""")
_lowerCamelCase : Any = hashimage(outputs["""depth"""])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item()) , 29.3_04)
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item()) , 2.6_62)
@require_torch
def UpperCamelCase_ ( self) -> List[Any]:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""")
| 88 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE : Optional[Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase , _lowercase : Dict = create_model(
'HTSAT-tiny' , 'roberta' , lowerCamelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCamelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = {}
_lowercase : Optional[Any] = R'.*sequential.(\d+).*'
_lowercase : Union[str, Any] = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowercase : str = key.replace(lowerCamelCase_ , lowerCamelCase_ )
if re.match(lowerCamelCase_ , lowerCamelCase_ ):
# replace sequential layers with list
_lowercase : Dict = re.match(lowerCamelCase_ , lowerCamelCase_ ).group(1 )
_lowercase : List[Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(lowerCamelCase_ )//3}.linear.''' )
elif re.match(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowercase : str = 1 if projecton_layer == 0 else 2
_lowercase : str = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowercase : Union[str, Any] = value
_lowercase : Union[str, Any] = mixed_qkv.size(0 ) // 3
_lowercase : Any = mixed_qkv[:qkv_dim]
_lowercase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowercase : str = mixed_qkv[qkv_dim * 2 :]
_lowercase : Optional[Any] = query_layer
_lowercase : Optional[Any] = key_layer
_lowercase : int = value_layer
else:
_lowercase : Optional[Any] = value
return model_state_dict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[int]:
_lowercase , _lowercase : Tuple = init_clap(lowerCamelCase_ , enable_fusion=lowerCamelCase_ )
clap_model.eval()
_lowercase : Optional[int] = clap_model.state_dict()
_lowercase : Dict = rename_state_dict(lowerCamelCase_ )
_lowercase : Optional[int] = ClapConfig()
_lowercase : Tuple = enable_fusion
_lowercase : int = ClapModel(lowerCamelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
transformers_config.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def _snake_case ( A , A ) -> float:
return sum(c * (x**i) for i, c in enumerate(A ) )
def _snake_case ( A , A ) -> float:
lowerCAmelCase__ = 0.0
for coeff in reversed(A ):
lowerCAmelCase__ = result * x + coeff
return result
if __name__ == "__main__":
__UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 90 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: List[str]
_lowerCamelCase: Optional[List[str]]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[int]
_lowerCamelCase: List[int]
_lowerCamelCase: Optional[List[int]] = None
_lowerCamelCase: Optional[List[int]] = None
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''train'''
_lowerCamelCase: Any = '''dev'''
_lowerCamelCase: Optional[Any] = '''test'''
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Dict ,A_ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[InputExample] ,A_ : List[str] ,A_ : int ,A_ : PreTrainedTokenizer ,A_ : Optional[int]=False ,A_ : Any="[CLS]" ,A_ : Optional[Any]=1 ,A_ : List[Any]="[SEP]" ,A_ : List[str]=False ,A_ : Union[str, Any]=False ,A_ : List[Any]=0 ,A_ : Optional[Any]=0 ,A_ : str=-100 ,A_ : Union[str, Any]=0 ,A_ : int=True ,) -> List[InputFeatures]:
A = {label: i for i, label in enumerate(A_ )}
A = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' ,A_ ,len(A_ ) )
A = []
A = []
for word, label in zip(example.words ,example.labels ):
A = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
A = tokens[: (max_seq_length - special_tokens_count)]
A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A = [cls_token] + tokens
A = [pad_token_label_id] + label_ids
A = [cls_token_segment_id] + segment_ids
A = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
A = max_seq_length - len(A_ )
if pad_on_left:
A = ([pad_token] * padding_length) + input_ids
A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A = ([pad_token_segment_id] * padding_length) + segment_ids
A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(A_ ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(A_ ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(A_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(A_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A = None
features.append(
InputFeatures(
input_ids=A_ ,attention_mask=A_ ,token_type_ids=A_ ,label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Optional[Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]:
# Load data features from cache or dataset file
A = os.path.join(
A_ ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(A_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
A = torch.load(A_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features ,A_ )
def __len__( self : List[str] ) -> str:
return len(self.features )
def __getitem__( self : List[str] ,A_ : int ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = -100
def __init__( self : List[Any] ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Union[str, Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]:
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Optional[Any] ,A_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
| 91 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase : Any =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase : Optional[Any] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[str] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Optional[Any] =2
lowercase : Optional[int] =randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , )
lowercase : int =floats_tensor(control_image.shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Tuple =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Optional[int] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase : Optional[int] =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Dict =ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Optional[int] =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowercase : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : Tuple =CLIPTextModel(UpperCAmelCase__ )
lowercase : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : str =MultiControlNetModel([controlneta, controlneta] )
lowercase : Tuple ={
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Optional[int] =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : Optional[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =2
lowercase : Dict =[
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCAmelCase__ , device=torch.device(UpperCAmelCase__ ) , ),
]
lowercase : List[str] =floats_tensor(control_image[0].shape , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Optional[Any] =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
lowercase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Optional[int] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
lowercase : Dict =10.0
lowercase : str =4
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =steps
lowercase : Dict =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : Union[str, Any] =scale
lowercase : int =pipe(**UpperCAmelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : int =steps
lowercase : str =scale
lowercase : Tuple =pipe(**UpperCAmelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase : Optional[int] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Optional[Any] =steps
lowercase : int =scale
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.get_dummy_components()
lowercase : List[str] =self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCAmelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Any =ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase : List[Any] =StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCAmelCase__ , controlnet=UpperCAmelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : List[Any] ='''evil space-punk bird'''
lowercase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase : Any =load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase : Union[str, Any] =pipe(
UpperCAmelCase__ , UpperCAmelCase__ , control_image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase : Optional[Any] =output.images[0]
assert image.shape == (512, 512, 3)
lowercase : Optional[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 92 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
from PIL import Image
def __A (_SCREAMING_SNAKE_CASE ) ->Image:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Dict = image.size
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Tuple = image.load()
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 93 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCamelCase_ = '''document_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = VisionEncoderDecoderModel
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : int , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> int:
'''simple docstring'''
lowercase : int ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase : Tuple =task_prompt.replace('''{user_input}''' , UpperCAmelCase )
lowercase : str =self.pre_processor.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors='''pt''' ).input_ids
lowercase : str =self.pre_processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase , ).sequences
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.pre_processor.batch_decode(UpperCAmelCase )[0]
lowercase : Dict =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase : List[str] =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase : Union[str, Any] =re.sub(R'''<.*?>''' , '''''' , UpperCAmelCase , count=1 ).strip() # remove first task start token
lowercase : Optional[Any] =self.pre_processor.tokenajson(UpperCAmelCase )
return sequence["answer"]
| 94 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
import requests
lowerCamelCase_ = '''YOUR API KEY'''
def snake_case ( A__ ,A__ = giphy_api_key ):
UpperCAmelCase_ : str = "+".join(query.split() )
UpperCAmelCase_ : Any = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCAmelCase_ : str = requests.get(A__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 95 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a ( __UpperCAmelCase : int ) -> Optional[int]:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def a ( __UpperCAmelCase : Tuple ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def a ( __UpperCAmelCase : str ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__: Optional[int] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Optional[int]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__magic_name__: Union[str, Any] = 0
# Doctest custom flag to ignore output.
__lowerCamelCase = doctest.register_optionflag('IGNORE_RESULT')
__lowerCamelCase = doctest.OutputChecker
class __A ( SCREAMING_SNAKE_CASE_ ):
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple ) -> str:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case )
__lowerCamelCase = CustomOutputChecker
__lowerCamelCase = HfDoctestModule
__lowerCamelCase = HfDocTestParser
| 96 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__( datasets.BuilderConfig ):
"""simple docstring"""
a :Optional[datasets.Features] = None
a :str = "utf-8"
a :Optional[str] = None
a :Optional[str] = None
a :bool = True # deprecated
a :Optional[int] = None # deprecated
a :int = 10 << 20 # 10MB
a :Optional[bool] = None
class lowercase__( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a :Any = JsonConfig
def _lowercase ( self : Dict ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowercase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE_ ).type
lowercase_ = pa_table.append_column(SCREAMING_SNAKE_CASE_ , pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=SCREAMING_SNAKE_CASE_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
# If the file has one json object per line
else:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(SCREAMING_SNAKE_CASE_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' )
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(SCREAMING_SNAKE_CASE_ ) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(SCREAMING_SNAKE_CASE_ , pa.ArrowInvalid )
and "straddling" not in str(SCREAMING_SNAKE_CASE_ )
or block_size > len(SCREAMING_SNAKE_CASE_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(SCREAMING_SNAKE_CASE_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
batch_idx += 1
| 97 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : List[str]=30 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Any=0.6 , lowerCAmelCase__ : Optional[Any]=None , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = mask_ratio
_UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected sequence length = num_patches
_UpperCamelCase = (self.image_size // self.patch_size) ** 2
_UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
_UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_snake_case : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_snake_case : Any = False
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : List[Any] = False
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = outputs_dict[0].numpy()
_UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCAmelCase__ : Optional[Any] ):
_UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase__ ):
_UpperCamelCase = v.numpy()
else:
_UpperCamelCase = np.array(lowerCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = prepare_numpy_arrays(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.constant(lowerCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase__ , '''_keras_serializable''' , lowerCAmelCase__ )
}
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.convert_to_tensor(lowerCAmelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCamelCase = main_layer_class(lowerCAmelCase__ )
_UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCamelCase = tf.keras.Model(lowerCAmelCase__ , outputs=main_layer(lowerCAmelCase__ ) )
_UpperCamelCase = model(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''keras_model.h5''' )
model.save(lowerCAmelCase__ )
_UpperCamelCase = tf.keras.models.load_model(
lowerCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase__ , tf.keras.Model )
_UpperCamelCase = model(lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = outputs.last_hidden_state.numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = outputs.logits.numpy()
_UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
_UpperCamelCase = model_class.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = after_outputs['''last_hidden_state'''].numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = after_outputs['''logits'''].numpy()
_UpperCamelCase = 0
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase__ )
_UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCamelCase = model_class.from_config(model.config )
_UpperCamelCase = new_model(lowerCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCamelCase = new_model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase = ViTMAEConfig()
_UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 98 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
from __future__ import annotations
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 100 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=2 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_6 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=1_0_0_0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : int = patch_size
SCREAMING_SNAKE_CASE_ : int = text_seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = coordinate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shape_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : int = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : List[Any] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : Dict = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : int = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : Optional[int] = t
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ : str = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
return True
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
SCREAMING_SNAKE_CASE_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
SCREAMING_SNAKE_CASE_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 101 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : List[Any] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_snake_case = int(np.ceil((x_end - xa) / step_size ) )
_snake_case = np.zeros((n + 1,) )
_snake_case = ya
_snake_case = xa
for k in range(lowerCAmelCase_ ):
_snake_case = y[k] + step_size * ode_func(lowerCAmelCase_ , y[k] )
_snake_case = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase_ , y[k] ) + ode_func(x + step_size , lowerCAmelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case__ ( self ) -> str:
return self._get_dummy_components()
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> Tuple:
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
A__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
A__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case__ ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case__ ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def snake_case__ ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case__ ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case__ ( self ) -> Any:
self._test_save_load_local()
def snake_case__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 104 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = IFPipeline
__a : str = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self ):
return self._get_dummy_components()
def snake_case ( self ,snake_case__ ,snake_case__=0 ):
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self ):
self._test_save_load_local()
def snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
# if
SCREAMING_SNAKE_CASE_ : str = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Any = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa ,text_encoder=snake_case__ ,tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = pipe_a.encode_prompt('anime turtle' ,device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_ : Tuple = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : int = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : str = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,original_image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,mask_image=snake_case__ ,num_inference_steps=2 ,generator=snake_case__ ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 256, 256) ,rng=random.Random(1 ) ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe_a(
prompt_embeds=snake_case__ ,negative_prompt_embeds=snake_case__ ,image=snake_case__ ,mask_image=snake_case__ ,original_image=snake_case__ ,generator=snake_case__ ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case__ ,snake_case__ )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 105 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=13 , __UpperCamelCase : int=7 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : int=99 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : str=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=512 , __UpperCamelCase : Dict=16 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4 , __UpperCamelCase : List[Any]=None , ) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
A = self.vocab_size - 1
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , *__UpperCamelCase : Optional[Any] ) -> str:
A = OpenAIGPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , head_mask=__UpperCamelCase )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , *__UpperCamelCase : Tuple ) -> List[str]:
A = OpenAIGPTLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Tuple ) -> Optional[Any]:
A = OpenAIGPTDoubleHeadsModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , *__UpperCamelCase : str ) -> str:
A = self.num_labels
A = OpenAIGPTForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : Any = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=False ) -> int:
A = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase , )
A = inputs_dict['labels']
A = inputs_dict['labels']
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__UpperCamelCase , )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __UpperCamelCase ( self : Any ) -> List[str]:
A = OpenAIGPTModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = OpenAIGPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
A = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__UpperCamelCase )
A = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=__UpperCamelCase ) # the president is
A = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() , __UpperCamelCase )
| 106 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict = logging.getLogger(__name__)
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "masked_bert"
def __init__( self : Dict, UpperCamelCase__ : Optional[int]=3_05_22, UpperCamelCase__ : Dict=7_68, UpperCamelCase__ : Tuple=12, UpperCamelCase__ : str=12, UpperCamelCase__ : List[str]=30_72, UpperCamelCase__ : Tuple="gelu", UpperCamelCase__ : Dict=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : Optional[Any]=5_12, UpperCamelCase__ : Optional[Any]=2, UpperCamelCase__ : Optional[Any]=0.02, UpperCamelCase__ : str=1e-12, UpperCamelCase__ : Dict=0, UpperCamelCase__ : Optional[Any]="topK", UpperCamelCase__ : str="constant", UpperCamelCase__ : Any=0.0, **UpperCamelCase__ : Dict, ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase__, **UpperCamelCase__ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = pruning_method
_A = mask_init
_A = mask_scale
| 107 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_encoder_blocks""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : int , lowerCamelCase : int=13 , lowerCamelCase : Any=64 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=[2, 2, 2, 2] , lowerCamelCase : Tuple=[8, 4, 2, 1] , lowerCamelCase : Optional[int]=[16, 32, 64, 128] , lowerCamelCase : int=[1, 4, 8, 16] , lowerCamelCase : Optional[Any]=[1, 2, 4, 8] , lowerCamelCase : str=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[Any]=3 , lowerCamelCase : Tuple=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = sr_ratios
_UpperCAmelCase = depths
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = downsampling_rates
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = _UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = SegformerModelTester(self )
_UpperCAmelCase = SegformerConfigTester(self , config_class=lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase = len(lowerCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
_UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
_UpperCAmelCase = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-1 ) )
@slow
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
# only resize + normalize
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
_UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
_UpperCAmelCase = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
_UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
_UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 108 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __a ( unittest.TestCase ):
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
task="""zero-shot-audio-classification""" ,model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__SCREAMING_SNAKE_CASE = load_dataset("""ashraq/esc50""" )
__SCREAMING_SNAKE_CASE = dataset["""train"""]["""audio"""][-1]["""array"""]
__SCREAMING_SNAKE_CASE = audio_classifier(lowerCamelCase ,candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] ,)
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
task="""zero-shot-audio-classification""" ,model="""laion/clap-htsat-unfused""" ,)
# This is an audio of a dog
__SCREAMING_SNAKE_CASE = load_dataset("""ashraq/esc50""" )
__SCREAMING_SNAKE_CASE = dataset["""train"""]["""audio"""][-1]["""array"""]
__SCREAMING_SNAKE_CASE = audio_classifier(lowerCamelCase ,candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] ,)
__SCREAMING_SNAKE_CASE = audio_classifier([audio] * 5 ,candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 ,)
__SCREAMING_SNAKE_CASE = audio_classifier(
[audio] * 5 ,candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] ,batch_size=5 )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,[
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 ,)
@unittest.skip("""No models are available in TF""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
| 109 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( lowercase , lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : str = AltDiffusionPipeline
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(UpperCamelCase_ )
UpperCAmelCase__ : Any = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCAmelCase__ : Tuple = 77
UpperCAmelCase__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Any = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Any = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = 'A photo of an astronaut'
UpperCAmelCase__ : str = alt_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Optional[int] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Dict = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : List[str] = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
UpperCAmelCase__ : int = text_encoder
UpperCAmelCase__ : Optional[Any] = AltDiffusionPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : List[str] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = alt_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : str = output.images
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : Dict = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : str = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
UpperCAmelCase__ : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='numpy' )
UpperCAmelCase__ : Dict = output.images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Dict = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 110 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "camembert"
def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class lowercase_ ( __lowercase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A__ : Union[str, Any] = False, False, False
@dataclass
class __snake_case :
_a = None
_a = True
_a = True
_a = None
# Automatically constructed
_a = "dict"
_a = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_a = field(default='''Audio''' ,init=__lowercase ,repr=__lowercase )
def __call__( self : Optional[Any]):
return self.pa_type
def UpperCAmelCase__ ( self : int , A_ : Union[str, bytes, dict]):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return {"bytes": None, "path": value}
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase_ : List[str] = BytesIO()
sf.write(SCREAMING_SNAKE_CASE_ , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase_ : Optional[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 3_2_7_6_7
else:
lowerCAmelCase_ : int = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 3_2_7_6_7
lowerCAmelCase_ : Union[str, Any] = BytesIO(bytes())
sf.write(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""")
def UpperCAmelCase__ ( self : Tuple , A_ : dict , A_ : Optional[Dict[str, Union[str, bool, None]]] = None):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
lowerCAmelCase_ , lowerCAmelCase_ : Dict = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
lowerCAmelCase_ : List[str] = xsplitext(SCREAMING_SNAKE_CASE_)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
lowerCAmelCase_ : Any = token_per_repo_id or {}
lowerCAmelCase_ : Optional[Any] = path.split('''::''')[-1]
try:
lowerCAmelCase_ : Dict = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL)['''repo_id''']
lowerCAmelCase_ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase_ : Tuple = None
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE_) as f:
lowerCAmelCase_ , lowerCAmelCase_ : str = sf.read(SCREAMING_SNAKE_CASE_)
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = sf.read(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[int] = array.T
if self.mono:
lowerCAmelCase_ : str = librosa.to_mono(SCREAMING_SNAKE_CASE_)
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase_ : Optional[Any] = librosa.resample(SCREAMING_SNAKE_CASE_ , orig_sr=SCREAMING_SNAKE_CASE_ , target_sr=self.sampling_rate)
lowerCAmelCase_ : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase__ ( self : str):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def UpperCAmelCase__ ( self : Tuple , A_ : Union[pa.StringArray, pa.StructArray]):
if pa.types.is_string(storage.type):
lowerCAmelCase_ : Union[str, Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_) , type=pa.binary())
lowerCAmelCase_ : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowerCAmelCase_ : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE_) , type=pa.string())
lowerCAmelCase_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
lowerCAmelCase_ : Optional[int] = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE_) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
lowerCAmelCase_ : List[str] = storage.field('''bytes''')
else:
lowerCAmelCase_ : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE_) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
lowerCAmelCase_ : Optional[Any] = storage.field('''path''')
else:
lowerCAmelCase_ : str = pa.array([None] * len(SCREAMING_SNAKE_CASE_) , type=pa.string())
lowerCAmelCase_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type)
def UpperCAmelCase__ ( self : Optional[int] , A_ : pa.StructArray):
@no_op_if_value_is_null
def path_to_bytes(A_ : int):
with xopen(SCREAMING_SNAKE_CASE_ , '''rb''') as f:
lowerCAmelCase_ : Tuple = f.read()
return bytes_
lowerCAmelCase_ : List[Any] = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase_ : Optional[int] = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE_) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
lowerCAmelCase_ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type)
| 171 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : list[list[int]]) -> int:
def update_area_of_max_square(_snake_case : int , _snake_case : int) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowercase =update_area_of_max_square(lowercase__ , col + 1)
_lowercase =update_area_of_max_square(row + 1 , col + 1)
_lowercase =update_area_of_max_square(row + 1 , lowercase__)
if mat[row][col]:
_lowercase =1 + min([right, diagonal, down])
_lowercase =max(largest_square_area[0] , lowercase__)
return sub_problem_sol
else:
return 0
_lowercase =[0]
update_area_of_max_square(0 , 0)
return largest_square_area[0]
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : list[list[int]]) -> int:
def update_area_of_max_square_using_dp_array(
_snake_case : int , _snake_case : int , _snake_case : list[list[int]]) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowercase =update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__)
_lowercase =update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__)
_lowercase =update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__)
if mat[row][col]:
_lowercase =1 + min([right, diagonal, down])
_lowercase =max(largest_square_area[0] , lowercase__)
_lowercase =sub_problem_sol
return sub_problem_sol
else:
return 0
_lowercase =[0]
_lowercase =[[-1] * cols for _ in range(lowercase__)]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__)
return largest_square_area[0]
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : list[list[int]]) -> int:
_lowercase =[[0] * (cols + 1) for _ in range(rows + 1)]
_lowercase =0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_lowercase =dp_array[row][col + 1]
_lowercase =dp_array[row + 1][col + 1]
_lowercase =dp_array[row + 1][col]
if mat[row][col] == 1:
_lowercase =1 + min(lowercase__ , lowercase__ , lowercase__)
_lowercase =max(dp_array[row][col] , lowercase__)
else:
_lowercase =0
return largest_square_area
def _snake_case (_snake_case : int , _snake_case : int , _snake_case : list[list[int]]) -> int:
_lowercase =[0] * (cols + 1)
_lowercase =[0] * (cols + 1)
_lowercase =0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
_lowercase =current_row[col + 1]
_lowercase =next_row[col + 1]
_lowercase =next_row[col]
if mat[row][col] == 1:
_lowercase =1 + min(lowercase__ , lowercase__ , lowercase__)
_lowercase =max(current_row[col] , lowercase__)
else:
_lowercase =0
_lowercase =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCAmelCase_ ( lowercase: str , lowercase: str , lowercase: Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_UpperCamelCase: List[Any] = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' , revision=lowercase__ )
| 271 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowerCAmelCase ( __lowercase ):
def _a ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "depth_multiplier" ) )
class _lowerCAmelCase :
def __init__( self , a_ , a_=13 , a_=3 , a_=32 , a_=0.25 , a_=8 , a_=True , a_=1024 , a_=32 , a_="relu6" , a_=0.1 , a_=0.02 , a_=True , a_=True , a_=10 , a_=None , ) -> int:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = min_depth
_UpperCAmelCase = tf_padding
_UpperCAmelCase = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_act
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def _a ( self ) -> List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self ) -> Tuple:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , a_ , a_ , a_ , a_ ) -> Union[str, Any]:
_UpperCAmelCase = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self , a_ , a_ , a_ , a_ ) -> int:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Dict:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
lowercase_ : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase_ : str = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : List[str] = False
lowercase_ : Any = False
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
def _a ( self ) -> str:
_UpperCAmelCase = MobileNetVaModelTester(self )
_UpperCAmelCase = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def _a ( self ) -> Dict:
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def _a ( self ) -> Dict:
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def _a ( self ) -> Any:
pass
def _a ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[str]:
def check_hidden_states_output(a_ , a_ , a_ ):
_UpperCAmelCase = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a ( self ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def _a ( self ) -> List[str]:
_UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 657 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : str = logging.getLogger(__name__)
class __UpperCamelCase (__lowercase ):
def __init__( self , _lowerCAmelCase=-1 ) -> List[Any]:
'''simple docstring'''
lowercase = label_idx
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase = mode.value
lowercase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as f:
lowercase = []
lowercase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
lowercase = []
lowercase = []
else:
lowercase = line.split(""" """ )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE_ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(SCREAMING_SNAKE_CASE_ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase (__lowercase ):
def __init__( self ) -> List[str]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase (__lowercase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase = mode.value
lowercase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
lowercase = []
lowercase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) )
guid_index += 1
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ):
lowercase = preds_list[example_id]
lowercase = """"""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE_ )
example_id += 1
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
try:
with open(lowercase__, '''rb''' ) as flax_state_f:
snake_case_ = from_bytes(lowercase__, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowercase__ ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(lowercase__, lowercase__ )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa, lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
snake_case_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowercase__ )
snake_case_ = ''''''
snake_case_ = flatten_dict(lowercase__, sep='''.''' )
snake_case_ = pt_model.state_dict()
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
snake_case_ = jnp.transpose(lowercase__, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
snake_case_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
snake_case_ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowercase__ ):
snake_case_ = (
flax_key_tuple_string.replace('''_0''', '''.0''' )
.replace('''_1''', '''.1''' )
.replace('''_2''', '''.2''' )
.replace('''_3''', '''.3''' )
.replace('''_4''', '''.4''' )
.replace('''_5''', '''.5''' )
.replace('''_6''', '''.6''' )
.replace('''_7''', '''.7''' )
.replace('''_8''', '''.8''' )
.replace('''_9''', '''.9''' )
)
snake_case_ = '''.'''.join(lowercase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(lowercase__ ) if not isinstance(lowercase__, np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
snake_case_ = list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(lowercase__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
return pt_model
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase (__lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = AutoencoderKL
_snake_case : Union[str, Any] = "sample"
_snake_case : Union[str, Any] = 1e-2
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : int = (3_2, 3_2)
UpperCAmelCase_ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return (3, 3_2, 3_2)
@property
def __UpperCAmelCase ( self ) -> int:
return (3, 3_2, 3_2)
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def __UpperCAmelCase ( self ) -> int:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : str = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[str] = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : str = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Dict = torch.randn_like(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Union[str, Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : Optional[int] = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : List[str] = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Optional[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ : Optional[Any] = dict(model.named_parameters() )
UpperCAmelCase_ : Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Any = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
UpperCAmelCase_ : Any = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : str = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCAmelCase_ : Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ : str = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
UpperCAmelCase_ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : str = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ : Tuple = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
return f"gaussian_noise_s={seed}_shape={'_'.join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy"
def __UpperCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , _UpperCamelCase=0 , _UpperCamelCase=(4, 3, 5_1_2, 5_1_2) , _UpperCamelCase=False ) -> int:
UpperCAmelCase_ : Union[str, Any] = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def __UpperCAmelCase ( self , _UpperCamelCase="CompVis/stable-diffusion-v1-4" , _UpperCamelCase=False ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = 'fp16' if fpaa else None
UpperCAmelCase_ : int = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : str = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Any = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : int = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[4_7, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Any = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[3_7, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : str = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
UpperCAmelCase_ : Any = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[1_6, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Any = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Tuple = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 6_4, 6_4) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
UpperCAmelCase_ : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : Optional[int] = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 6_4, 6_4) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : Tuple = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
UpperCAmelCase_ : Any = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[4_7, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : List[Any] = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
UpperCAmelCase_ : List[Any] = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Union[str, Any] = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ : str = 3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 406 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE : str =False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self ) -> Dict:
return 12
@property
def a__ ( self ) -> Tuple:
return 12
@property
def a__ ( self ) -> List[str]:
return 32
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
A: Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def a__ ( self ) -> Optional[int]:
A: int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
A: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
@property
def a__ ( self ) -> Any:
torch.manual_seed(0 )
A: Dict = 12
A: Optional[int] = 12
A: Optional[Any] = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
A: Optional[Any] = TransformeraDModel(**SCREAMING_SNAKE_CASE_ )
return model
def a__ ( self ) -> str:
A: Dict = """cpu"""
A: List[Any] = self.dummy_vqvae
A: Optional[Any] = self.dummy_text_encoder
A: Dict = self.dummy_tokenizer
A: List[Any] = self.dummy_transformer
A: List[Any] = VQDiffusionScheduler(self.num_embed )
A: Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=SCREAMING_SNAKE_CASE_ )
A: List[str] = VQDiffusionPipeline(
vqvae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , transformer=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE_ , )
A: List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A: List[Any] = """teddy bear playing in the pool"""
A: int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
A: str = pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""np""" )
A: Tuple = output.images
A: str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
A: List[Any] = pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 )[0]
A: int = image[0, -3:, -3:, -1]
A: List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A: int = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Dict:
A: Optional[Any] = """cpu"""
A: Optional[Any] = self.dummy_vqvae
A: int = self.dummy_text_encoder
A: List[str] = self.dummy_tokenizer
A: Any = self.dummy_transformer
A: List[str] = VQDiffusionScheduler(self.num_embed )
A: List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=SCREAMING_SNAKE_CASE_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A: int = VQDiffusionPipeline(
vqvae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , transformer=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE_ , )
A: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = """teddy bear playing in the pool"""
A: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
A: List[str] = pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""np""" )
A: int = output.images
A: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
A: str = pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , return_dict=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 )[0]
A: List[Any] = image[0, -3:, -3:, -1]
A: List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A: Union[str, Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
A: Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
A: Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
A: Any = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
A: List[Any] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
A: List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 135 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir("fixtures/test_sentencepiece.model")
_a : Dict = {"target_lang": "fi", "source_lang": "en"}
_a : Optional[int] = ">>zh<<"
_a : List[str] = "Helsinki-NLP/"
if is_torch_available():
_a : List[str] = "pt"
elif is_tf_available():
_a : Dict = "tf"
else:
_a : Union[str, Any] = "jax"
@require_sentencepiece
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = MarianTokenizer
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : int ) -> int:
super().setUp()
__snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
__snake_case = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def a ( self : int ) -> Optional[Any]:
__snake_case = '</s>'
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> List[str]:
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def a ( self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a ( self : Any ) -> Optional[int]:
__snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' )
__snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
__snake_case = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )]
self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Any:
__snake_case = self.get_tokenizer()
__snake_case = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a ( self : Tuple ) -> Dict:
__snake_case = self.get_tokenizer()
__snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a ( self : int ) -> int:
# fmt: off
__snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a ( self : Dict ) -> str:
__snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
__snake_case = 'Tämä on testi'
__snake_case = 'This is a test'
__snake_case = [76, 7, 2047, 2]
__snake_case = [69, 12, 11, 940, 2]
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
"""simple docstring"""
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 553 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
if len(lowercase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _a (lowercase__ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '08x' )[-8:]
__snake_case = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = B''
for char in message:
bit_string += format(lowercase__ , '08b' ).encode('utf-8' )
__snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowercase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(lowercase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(lowercase__ ) , 5_1_2 ):
__snake_case = bit_string[pos : pos + 5_1_2]
__snake_case = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__snake_case = format(lowercase__ , '032b' )
__snake_case = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowercase__ , 2 )
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def _a (lowercase__ : int , lowercase__ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _a (lowercase__ : bytes ) -> bytes:
"""simple docstring"""
__snake_case = preprocess(lowercase__ )
__snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
__snake_case = 0x6_7_4_5_2_3_0_1
__snake_case = 0xE_F_C_D_A_B_8_9
__snake_case = 0x9_8_B_A_D_C_F_E
__snake_case = 0x1_0_3_2_5_4_7_6
__snake_case = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowercase__ ):
__snake_case = aa
__snake_case = ba
__snake_case = ca
__snake_case = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__snake_case = d ^ (b & (c ^ d))
__snake_case = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__snake_case = c ^ (d & (b ^ c))
__snake_case = (5 * i + 1) % 1_6
elif i <= 4_7:
__snake_case = b ^ c ^ d
__snake_case = (3 * i + 5) % 1_6
else:
__snake_case = c ^ (b | not_aa(lowercase__ ))
__snake_case = (7 * i) % 1_6
__snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2
__snake_case = d
__snake_case = c
__snake_case = b
__snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = sum_aa(lowercase__ , lowercase__ )
__snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCamelCase = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_lowerCamelCase = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_lowerCamelCase = "▁"
class UpperCamelCase_ ( __lowercase ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Optional[int]="<s>" , __A :Optional[int]="</s>" , __A :List[Any]="</s>" , __A :Tuple="<s>" , __A :List[str]="<unk>" , __A :Any="<pad>" , __A :str="<mask>" , __A :Optional[Dict[str, Any]] = None , **__A :Optional[Any] , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self :Tuple , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self :Union[str, Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self :List[str] ) -> Dict:
"""simple docstring"""
return len(self.sp_model )
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self :Dict , __A :str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Dict , __A :Tuple ) -> List[str]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def _snake_case ( self :List[Any] , __A :Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Dict , __A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self :List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self :str , __A :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self :Union[str, Any] , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 6 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase__ : List[Any] = None
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ : Optional[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowercase__ : Union[str, Any] = {
"camembert-base": 5_1_2,
}
lowercase__ : List[str] = "▁"
class lowercase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] = ["input_ids", "attention_mask"]
UpperCAmelCase_ : Optional[int] = CamembertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **__SCREAMING_SNAKE_CASE , ) ->Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 312 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : int ) -> str:
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_SCREAMING_SNAKE_CASE : Tuple = "bigscience/bloom-1b7"
# Constant values
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2.109659552692574
_SCREAMING_SNAKE_CASE : Optional[Any] = "Hello my name is"
_SCREAMING_SNAKE_CASE : List[str] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
_SCREAMING_SNAKE_CASE : Dict = 1_0
def a ( self : Optional[Any] ) -> List[Any]:
# Models and tokenizer
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( __lowercase ):
def a ( self : Union[str, Any] ) -> List[str]:
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : Optional[Any] ) -> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[Any] ) -> int:
__snake_case = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'quantization_config' ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def a ( self : Optional[Any] ) -> str:
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a ( self : Union[str, Any] ) -> int:
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : Optional[Any] ) -> Dict:
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def a ( self : List[Any] ) -> str:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Union[str, Any]:
__snake_case = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a ( self : Tuple ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
@classmethod
def a ( cls : Union[str, Any] ) -> Dict:
__snake_case = 't5-small'
__snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = 'Translate in German: Hello, my dog is cute'
def a ( self : List[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ) -> Optional[Any]:
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
__snake_case = modules
def a ( self : List[str] ) -> Any:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__snake_case = model.generate(**SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
def a ( self : Dict ) -> str:
super().setUp()
# model_name
__snake_case = 'bigscience/bloom-560m'
__snake_case = 't5-small'
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='auto' )
def a ( self : int ) -> Dict:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ) -> Optional[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( __lowercase ):
def a ( self : str ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[Any] ) -> str:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a ( self : Optional[int] ) -> List[str]:
__snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( __lowercase ):
def a ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
def a ( self : Optional[int] ) -> List[Any]:
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( __lowercase ):
def a ( self : Any ) -> str:
__snake_case = 'facebook/opt-350m'
super().setUp()
def a ( self : int ) -> List[Any]:
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gpt2-xl"
_SCREAMING_SNAKE_CASE : Optional[int] = 3.3191854854152187
| 56 | 0 |
from __future__ import annotations
A__ : Dict = list[tuple[int, int]]
A__ : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self : List[str] , A_ : int , A_ : int , A_ : int , A_ : int , A_ : float , A_ : Node | None , ):
lowerCAmelCase_ : Optional[int] = pos_x
lowerCAmelCase_ : List[Any] = pos_y
lowerCAmelCase_ : List[Any] = (pos_y, pos_x)
lowerCAmelCase_ : Union[str, Any] = goal_x
lowerCAmelCase_ : Dict = goal_y
lowerCAmelCase_ : Optional[int] = g_cost
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Dict = self.calculate_heuristic()
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[Any] = abs(self.pos_x - self.goal_x)
lowerCAmelCase_ : Dict = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self : Any , A_ : List[Any]):
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : List[str] , A_ : tuple[int, int] , A_ : tuple[int, int]):
lowerCAmelCase_ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : List[str] = [self.start]
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Any = False
def UpperCAmelCase__ ( self : Optional[int]):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ : str = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
lowerCAmelCase_ : Any = True
return self.retrace_path(SCREAMING_SNAKE_CASE_)
self.closed_nodes.append(SCREAMING_SNAKE_CASE_)
lowerCAmelCase_ : Optional[int] = self.get_successors(SCREAMING_SNAKE_CASE_)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_)
else:
# retrieve the best current path
lowerCAmelCase_ : List[Any] = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_)
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_)
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self : Tuple , A_ : Node):
lowerCAmelCase_ : Optional[int] = []
for action in delta:
lowerCAmelCase_ : int = parent.pos_x + action[1]
lowerCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ))
return successors
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Node | None):
lowerCAmelCase_ : str = node
lowerCAmelCase_ : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowerCAmelCase_ : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A__ : List[str] = (0, 0)
A__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
A__ : int = GreedyBestFirst(init, goal)
A__ : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A__ : Union[str, Any] = 2
for elem in grid:
print(elem)
| 171 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
"""simple docstring"""
def __init__( self :Optional[int], snake_case :str = "▁", snake_case :bool = True, snake_case :Union[str, AddedToken] = "<unk>", snake_case :Union[str, AddedToken] = "</s>", snake_case :Union[str, AddedToken] = "<pad>", ):
"""simple docstring"""
_lowercase ={
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
_lowercase =[None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowercase =token_dict['token']
_lowercase =Tokenizer(Unigram())
_lowercase =normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}'), ' '),
normalizers.Lowercase(),
])
_lowercase =pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_),
pre_tokenizers.Digits(individual_digits=SCREAMING_SNAKE_CASE_),
pre_tokenizers.Punctuation(),
])
_lowercase =decoders.Metaspace(replacement=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_)
_lowercase =TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])], )
_lowercase ={
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Optional[Any], snake_case :Union[str, List[str]], snake_case :int = 8000, snake_case :bool = True, ):
"""simple docstring"""
_lowercase =trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_, special_tokens=self.special_tokens_list, show_progress=SCREAMING_SNAKE_CASE_, )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_):
_lowercase =[files]
self._tokenizer.train(SCREAMING_SNAKE_CASE_, trainer=SCREAMING_SNAKE_CASE_)
self.add_unk_id()
def UpperCamelCase__ ( self :str, snake_case :Union[Iterator[str], Iterator[Iterator[str]]], snake_case :int = 8000, snake_case :bool = True, ):
"""simple docstring"""
_lowercase =trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_, special_tokens=self.special_tokens_list, show_progress=SCREAMING_SNAKE_CASE_, )
self._tokenizer.train_from_iterator(SCREAMING_SNAKE_CASE_, trainer=SCREAMING_SNAKE_CASE_)
self.add_unk_id()
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =json.loads(self._tokenizer.to_str())
_lowercase =self.special_tokens['unk']['id']
_lowercase =Tokenizer.from_str(json.dumps(SCREAMING_SNAKE_CASE_))
| 181 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int ) -> float:
"""simple docstring"""
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_a : Union[str, Any] = int(input("Enter the base: ").strip())
_a : Any = int(input("Enter the exponent: ").strip())
_a : List[str] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a : List[Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 56 | 0 |
def lowerCAmelCase_ ( lowercase: int ) -> list:
'''simple docstring'''
_UpperCamelCase: List[str] = int(lowercase__ )
if n_element < 1:
_UpperCamelCase: Any = ValueError('''a should be a positive number''' )
raise my_error
_UpperCamelCase: str = [1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Union[str, Any] = (0, 0, 0)
_UpperCamelCase: Optional[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCAmelCase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
UpperCAmelCase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('''-----------------------------------------------------''')
| 271 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
__magic_name__ = {"target_lang": "fi", "source_lang": "en"}
__magic_name__ = ">>zh<<"
__magic_name__ = "Helsinki-NLP/"
if is_torch_available():
__magic_name__ = "pt"
elif is_tf_available():
__magic_name__ = "tf"
else:
__magic_name__ = "jax"
@require_sentencepiece
class _lowerCAmelCase ( __lowercase , unittest.TestCase ):
lowercase_ : int = MarianTokenizer
lowercase_ : str = False
lowercase_ : Union[str, Any] = True
def _a ( self ) -> int:
super().setUp()
_UpperCAmelCase = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCAmelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
_UpperCAmelCase = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
_UpperCAmelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , **a_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _a ( self , a_ ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = "</s>"
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 )
def _a ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _a ( self ) -> Optional[int]:
_UpperCAmelCase = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
_UpperCAmelCase = en_de_tokenizer(["I am a small frog"] , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] )
_UpperCAmelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob("*" )]
self.assertIn("source.spm" , SCREAMING_SNAKE_CASE_ )
MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> Any:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _a ( self ) -> Dict:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tok(["I am a tiny frog", "I am a small frog"] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _a ( self ) -> int:
# fmt: off
_UpperCAmelCase = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _a ( self ) -> str:
_UpperCAmelCase = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
_UpperCAmelCase = "Tämä on testi"
_UpperCAmelCase = "This is a test"
_UpperCAmelCase = [76, 7, 2047, 2]
_UpperCAmelCase = [69, 12, 11, 940, 2]
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 657 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = CpmAntTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def a ( self : Optional[Any] ) -> Any:
super().setUp()
__snake_case = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def a ( self : List[Any] ) -> Dict:
__snake_case = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__snake_case = '今天天气真好!'
__snake_case = ['今天', '天气', '真', '好', '!']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = '今天天气真好!'
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
lowercase = module
lowercase = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) , nn.Linear(SCREAMING_SNAKE_CASE_ , module.out_features , bias=SCREAMING_SNAKE_CASE_ ) , )
lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _a ( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.module(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) + self.adapter(SCREAMING_SNAKE_CASE_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase (unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__A = "bigscience/bloom-1b7"
# Constant values
__A = 2.1_09_65_95_52_69_25_74
__A = "Hello my name is"
__A = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__A = 10
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
def _a ( self ) -> Any:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """quantization_config""" ) )
lowercase = config.to_dict()
lowercase = config.to_diff_dict()
lowercase = config.to_json_string()
def _a ( self ) -> str:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
lowercase = self.model_fpaa.get_memory_footprint()
lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = BitsAndBytesConfig()
lowercase = True
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
def _a ( self ) -> str:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE_ , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _a ( self ) -> Dict:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" )
lowercase = self.model_fpaa.to(torch.floataa )
lowercase = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowercase = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
lowercase = self.model_fpaa.half()
# Check this does not throw an error
lowercase = self.model_fpaa.float()
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase (unittest.TestCase ):
@classmethod
def _a ( cls ) -> Dict:
'''simple docstring'''
lowercase = """t5-small"""
lowercase = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
lowercase = AutoTokenizer.from_pretrained(cls.model_name )
lowercase = """Translate in German: Hello, my dog is cute"""
def _a ( self ) -> str:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TaForConditionalGeneration
lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase = None
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase = model.generate(**SCREAMING_SNAKE_CASE_ )
lowercase = modules
def _a ( self ) -> Any:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase = model.generate(**SCREAMING_SNAKE_CASE_ )
# test with `flan-t5-small`
lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
lowercase = model.generate(**SCREAMING_SNAKE_CASE_ )
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> str:
'''simple docstring'''
super().setUp()
# model_name
lowercase = """bigscience/bloom-560m"""
lowercase = """t5-small"""
# Different types of model
lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
# Sequence classification model
lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
# CausalLM model
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
# Seq2seq model
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""auto""" )
def _a ( self ) -> Dict:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
def _a ( self ) -> str:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowercase = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
lowercase = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """facebook/opt-350m"""
super().setUp()
def _a ( self ) -> List[Any]:
'''simple docstring'''
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE_ ) ):
lowercase = LoRALayer(module.q_proj , rank=16 )
lowercase = LoRALayer(module.k_proj , rank=16 )
lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowercase = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase = model.forward(**SCREAMING_SNAKE_CASE_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase (__lowercase ):
__A = "gpt2-xl"
__A = 3.31_91_85_48_54_15_21_87
| 588 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=None ) -> int:
'''simple docstring'''
require_version(deps[pkg], lowercase__ )
| 640 |
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56 | 0 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return base * power(lowercase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
__UpperCAmelCase = int(input('Enter the base: ').strip())
__UpperCAmelCase = int(input('Enter the exponent: ').strip())
__UpperCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__UpperCAmelCase = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 406 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def _a () -> Union[str, Any]:
"""simple docstring"""
__snake_case = 1_0
__snake_case = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Dict ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
_a : Union[str, Any] = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case = FILE_CONTENT
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case = bytes(lowercase__ , 'utf-8' )
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case = bytes(lowercase__ , 'utf-8' )
with gzip.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lza.frame.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Tuple ) -> Tuple:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import tarfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
import lzma
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case = bytes(lowercase__ , 'utf-8' )
with lzma.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowercase__ , 'w' ) as f:
f.write(lowercase__ )
return filename
_a : int = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_a : List[str] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_a : Tuple = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_a : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_a : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def _a () -> Optional[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = datasets.Dataset.from_dict(lowercase__ )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> Dict:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__snake_case = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowercase__ , 'w' , newline='' ) as f:
__snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
import bza
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowercase__ , 'rb' ) as f:
__snake_case = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int ) -> int:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowercase__ , 'wb' ) as f:
__snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
__snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case = {'data': DATA_DICT_OF_LISTS}
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int ) -> Tuple:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict ) -> int:
"""simple docstring"""
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowercase__ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> Dict:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import gzip
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowercase__ , 'rb' ) as orig_file:
with gzip.open(lowercase__ , 'wb' ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : str ) -> Optional[int]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case = ['0', '1', '2', '3']
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowercase__ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> str:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : Any ) -> List[Any]:
"""simple docstring"""
__snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope='session' )
def _a () -> int:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def _a () -> Optional[int]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def _a (lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowercase__ , 'w' ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def _a (lowercase__ : str ) -> List[Any]:
"""simple docstring"""
__snake_case = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 56 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=2 , A=56 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=2 , A=2 , A=7 , A="gelu_new" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=4 , A="block_sparse" , A=True , A=False , A=2 , A=3 , ) -> Union[str, Any]:
A: Union[str, Any] = parent
A: Optional[Any] = batch_size
A: Tuple = seq_length
A: int = is_training
A: Optional[int] = use_attention_mask
A: Union[str, Any] = use_token_type_ids
A: Any = use_labels
A: Any = vocab_size
A: List[str] = hidden_size
A: Dict = num_hidden_layers
A: Any = num_attention_heads
A: int = intermediate_size
A: Union[str, Any] = hidden_act
A: int = hidden_dropout_prob
A: Tuple = attention_probs_dropout_prob
A: str = max_position_embeddings
A: List[str] = type_vocab_size
A: Union[str, Any] = type_sequence_label_size
A: Optional[Any] = initializer_range
A: Any = num_choices
A: Any = rescale_embeddings
A: Optional[Any] = attention_type
A: str = use_bias
A: str = block_size
A: List[str] = num_random_blocks
def a__ ( self ) -> Tuple:
A: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A: Any = None
if self.use_attention_mask:
A: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A: Dict = None
if self.use_token_type_ids:
A: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A: Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> Optional[int]:
A: str = self.prepare_config_and_inputs()
A , A , A , A: Union[str, Any] = config_and_inputs
A: Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A__ : Optional[Any] = False
A__ : int = False
def a__ ( self ) -> Tuple:
A: List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Tuple:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Tuple:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Dict:
super().test_hidden_states_output()
@slow
def a__ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
A: Union[str, Any] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a__ ( self ) -> Tuple:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a__ ( self ) -> Dict:
A , A: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A: Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(A , A=None , **A ):
return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
A: str = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A: Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a__ ( self , A , A , A , A=1e-5 , A="outputs" , A=None ) -> Dict:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "camembert"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0522 , SCREAMING_SNAKE_CASE_ : str=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class _lowercase ( __lowercase ):
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 56 | 0 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return TrainCommand(lowercase__ )
class __UpperCamelCase ( __lowercase ):
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase : ArgumentParser ) -> Any:
lowerCAmelCase :Any = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE_ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE_ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE_ , default=1e-0_8 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , UpperCAmelCase : Namespace ) -> Optional[int]:
lowerCAmelCase :int = logging.get_logger('transformers-cli/training' )
lowerCAmelCase :Any = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase :str = args.output
lowerCAmelCase :List[Any] = args.column_label
lowerCAmelCase :Tuple = args.column_text
lowerCAmelCase :Optional[int] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowerCAmelCase :Optional[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowerCAmelCase :Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase :Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowerCAmelCase :Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase :List[Any] = args.validation_split
lowerCAmelCase :Tuple = args.train_batch_size
lowerCAmelCase :int = args.valid_batch_size
lowerCAmelCase :Any = args.learning_rate
lowerCAmelCase :Tuple = args.adam_epsilon
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
raise NotImplementedError
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 553 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: bool = True , UpperCamelCase__: float = math.inf , UpperCamelCase__: float = -math.inf , UpperCamelCase__: float = math.inf , UpperCamelCase__: float = -math.inf , UpperCamelCase__: bool = False , UpperCamelCase__: float = 100 , UpperCamelCase__: float = 0.0_1 , UpperCamelCase__: float = 1 , ):
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = search_prob
SCREAMING_SNAKE_CASE__ = start_temperate
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
while not search_end:
SCREAMING_SNAKE_CASE__ = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE__ = current_state
scores.append(lowercase__ )
iterations += 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE__ = random.randint(0 , len(lowercase__ ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE__ = neighbors.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE__ = picked_neighbor
else:
SCREAMING_SNAKE_CASE__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE__ = picked_neighbor
SCREAMING_SNAKE_CASE__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase__ ) , lowercase__ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
return (3 * x**2) - (6 * y)
_lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
_lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 | 0 |
A__ : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A__ : Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}]
A__ : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 171 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
def lowerCAmelCase_ ( lowercase: int = 3 , lowercase: int = 7 , lowercase: int = 1_000_000 ) -> int:
'''simple docstring'''
_UpperCamelCase: int = 0
_UpperCamelCase: Optional[Any] = 1
for current_denominator in range(1 , limit + 1 ):
_UpperCamelCase: Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCamelCase: Optional[int] = current_numerator
_UpperCamelCase: Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 271 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["DPTFeatureExtractor"]
__magic_name__ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _a () -> Dict:
"""simple docstring"""
__snake_case = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__snake_case = get_sagemaker_input()
else:
__snake_case = get_cluster_input()
return config
def _a (lowercase__ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if subparsers is not None:
__snake_case = subparsers.add_parser('config' , description=lowercase__ )
else:
__snake_case = argparse.ArgumentParser('Accelerate config command' , description=lowercase__ )
parser.add_argument(
'--config_file' , default=lowercase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def _a (lowercase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = get_user_input()
if args.config_file is not None:
__snake_case = args.config_file
else:
if not os.path.isdir(lowercase__ ):
os.makedirs(lowercase__ )
__snake_case = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase__ )
else:
config.to_yaml_file(lowercase__ )
print(f'accelerate configuration saved at {config_file}' )
def _a () -> int:
"""simple docstring"""
__snake_case = config_command_parser()
__snake_case = parser.parse_args()
config_command(lowercase__ )
if __name__ == "__main__":
main()
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : Any = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 588 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 |
'''simple docstring'''
from __future__ import annotations
def _a (lowercase__ : int , lowercase__ : int ) -> list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case = number_of_bytes // partitions
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i * bytes_per_partition + 1
__snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def lowercase__ ( __snake_case : int , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", 'stage2.cls_token') )
return token
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def lowercase__ ( __snake_case : List[str] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCAmelCase_ : List[Any] = 1_000
UpperCAmelCase_ : str = 'huggingface/label-files'
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase_ : Optional[int] = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : int = idalabel
UpperCAmelCase_ : str = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = CvtConfig(num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCAmelCase_ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCAmelCase_ : List[str] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ : Optional[Any] = [2, 2, 20]
UpperCAmelCase_ : str = [3, 12, 16]
UpperCAmelCase_ : Tuple = [192, 768, 1_024]
UpperCAmelCase_ : Union[str, Any] = CvtForImageClassification(lowercase__ )
UpperCAmelCase_ : List[str] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Union[str, Any] = torch.load(lowercase__ , map_location=torch.device('cpu' ) )
UpperCAmelCase_ : Tuple = OrderedDict()
UpperCAmelCase_ : Optional[int] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ : str = list_of_state_dict + cls_token(lowercase__ )
UpperCAmelCase_ : List[Any] = list_of_state_dict + embeddings(lowercase__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ : Union[str, Any] = list_of_state_dict + attention(lowercase__ , lowercase__ )
UpperCAmelCase_ : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowercase__ )
for i in range(len(lowercase__ ) ):
UpperCAmelCase_ : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 406 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowercase ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0_1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1000 ) -> Tuple:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self : Any ) -> Union[str, Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]:
__snake_case = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
__snake_case = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
__snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] ) -> Tuple:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
random.seed(SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
__snake_case = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
__snake_case = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def a ( self : Dict ) -> Tuple:
__snake_case = 42
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> str:
__snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
__snake_case = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : str ) -> Union[str, Any]:
__snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ) -> str:
__snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : Tuple ) -> Dict:
Accelerator()
__snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 56 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.