code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="altclip_text_model"
def __init__( self , UpperCamelCase_=25_00_02 , UpperCamelCase_=10_24 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=40_96 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_14 , UpperCamelCase_=1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-05 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=7_68 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : int = intermediate_size
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : Dict = type_vocab_size
__lowercase : int = initializer_range
__lowercase : Optional[int] = initializer_factor
__lowercase : Union[str, Any] = layer_norm_eps
__lowercase : int = position_embedding_type
__lowercase : Optional[Any] = use_cache
__lowercase : Optional[Any] = project_dim
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="altclip_vision_model"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=2_24 , UpperCamelCase_=32 , UpperCamelCase_="quick_gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1.0 , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**UpperCamelCase_ )
__lowercase : Tuple = hidden_size
__lowercase : Optional[int] = intermediate_size
__lowercase : int = projection_dim
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : str = num_channels
__lowercase : int = patch_size
__lowercase : List[str] = image_size
__lowercase : Optional[int] = initializer_range
__lowercase : Union[str, Any] = initializer_factor
__lowercase : Optional[int] = attention_dropout
__lowercase : str = layer_norm_eps
__lowercase : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : Optional[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
__lowercase : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="altclip"
UpperCamelCase =True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=7_68 , UpperCamelCase_=2.6_5_9_2 , **UpperCamelCase_ ) -> Optional[int]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__lowercase : Optional[Any] = kwargs.pop('''text_config_dict''' , UpperCamelCase_ )
__lowercase : Tuple = kwargs.pop('''vision_config_dict''' , UpperCamelCase_ )
super().__init__(**UpperCamelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowercase : Any = {}
# This is the complete result when using `text_config_dict`.
__lowercase : Any = AltCLIPTextConfig(**UpperCamelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowercase : Union[str, Any] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Tuple = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowercase : Optional[Any] = {}
# This is the complete result when using `vision_config_dict`.
__lowercase : List[str] = AltCLIPVisionConfig(**UpperCamelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowercase : Any = {
str(UpperCamelCase_ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowercase : Union[str, Any] = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Optional[int] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(UpperCamelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowercase : Optional[int] = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
__lowercase : Tuple = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
__lowercase : Union[str, Any] = AltCLIPTextConfig(**UpperCamelCase_ )
__lowercase : List[Any] = AltCLIPVisionConfig(**UpperCamelCase_ )
__lowercase : str = projection_dim
__lowercase : str = logit_scale_init_value
__lowercase : List[Any] = 1.0
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[Any] = copy.deepcopy(self.__dict__ )
__lowercase : Optional[Any] = self.text_config.to_dict()
__lowercase : int = self.vision_config.to_dict()
__lowercase : Optional[int] = self.__class__.model_type
return output
| 76 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Any:
_lowercase : str = 10
_lowercase : List[str] = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_lowercase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(lowerCamelCase_ ) ),
} , features=lowerCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE : str = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'file.txt'
_lowercase : List[str] = FILE_CONTENT
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
import bza
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_lowercase : Optional[int] = bytes(lowerCamelCase_ , 'utf-8' )
with gzip.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
_lowercase : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
with lza.frame.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase_ , 'w' ) as archive:
archive.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
import tarfile
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
import lzma
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
_lowercase : int = bytes(lowerCamelCase_ , 'utf-8' )
with lzma.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
import zipfile
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
_lowercase : Dict = bytes(lowerCamelCase_ , 'utf-8' )
with zstd.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
_lowercase : Optional[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return filename
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
SCREAMING_SNAKE_CASE : Dict = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
SCREAMING_SNAKE_CASE : Optional[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE : Tuple = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
SCREAMING_SNAKE_CASE : Any = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> List[str]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase_ )
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con:
_lowercase : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : Tuple = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase_ , 'w' , newline='' ) as f:
_lowercase : str = csv.DictWriter(lowerCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
import bza
_lowercase : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase_ , 'wb' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_lowercase : Optional[Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase_ , 'wb' ) as f:
_lowercase : List[str] = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ )
_lowercase : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase_ ) )] for k in DATA[0]} , schema=lowerCamelCase_ )
writer.write_table(lowerCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : List[Any] = {'data': DATA}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_lowercase : Optional[Any] = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
import gzip
_lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
import gzip
_lowercase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase_ , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.add(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Optional[int] = ['0', '1', '2', '3']
_lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : str = ['0', '1', '2', '3']
_lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : List[Any] = ['0', '1', '2', '3']
_lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
f.write(lowerCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Any = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase_ )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( ) -> int:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ) )
f.write(lowerCamelCase_ , arcname=os.path.basename(lowerCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
_lowercase : str = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 89 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : torch.FloatTensor
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=("DownEncoderBlock2D",) , __lowerCAmelCase=(64,) , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=True , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE_ : Any = torch.nn.Convad(
__lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_channel
SCREAMING_SNAKE_CASE_ : int = block_out_channels[i]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i == len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ : Dict = get_down_block(
__lowerCAmelCase , num_layers=self.layers_per_block , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__lowerCAmelCase , resnet_groups=__lowerCAmelCase , attention_head_dim=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
self.down_blocks.append(__lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
# out
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__lowerCAmelCase , eps=1e-6 )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.SiLU()
SCREAMING_SNAKE_CASE_ : Optional[int] = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ : str = nn.Convad(block_out_channels[-1] , __lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x
SCREAMING_SNAKE_CASE_ : Optional[int] = self.conv_in(__lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCAmelCase ):
def custom_forward(*__lowerCAmelCase ):
return module(*__lowerCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ : Tuple = down_block(__lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ : Optional[int] = self.mid_block(__lowerCAmelCase )
# post-process
SCREAMING_SNAKE_CASE_ : Dict = self.conv_norm_out(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = self.conv_act(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = self.conv_out(__lowerCAmelCase )
return sample
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=("UpDecoderBlock2D",) , __lowerCAmelCase=(64,) , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase="group" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ : int = layers_per_block
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Convad(
__lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ : List[Any] = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_ : Tuple = list(reversed(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = output_channel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ : List[str] = i == len(__lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ : str = get_up_block(
__lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__lowerCAmelCase , resnet_groups=__lowerCAmelCase , attention_head_dim=__lowerCAmelCase , temb_channels=__lowerCAmelCase , resnet_time_scale_shift=__lowerCAmelCase , )
self.up_blocks.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SpatialNorm(block_out_channels[0] , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__lowerCAmelCase , eps=1e-6 )
SCREAMING_SNAKE_CASE_ : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Convad(block_out_channels[0] , __lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ : str = False
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : List[Any] = z
SCREAMING_SNAKE_CASE_ : Any = self.conv_in(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCAmelCase ):
def custom_forward(*__lowerCAmelCase ):
return module(*__lowerCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ : List[str] = self.mid_block(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ : Optional[int] = up_block(__lowerCAmelCase , __lowerCAmelCase )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ : Tuple = self.conv_norm_out(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.conv_norm_out(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = self.conv_act(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = self.conv_out(__lowerCAmelCase )
return sample
class snake_case_ ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="random" , __lowerCAmelCase=False , __lowerCAmelCase=True ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[int] = n_e
SCREAMING_SNAKE_CASE_ : Optional[Any] = vq_embed_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = beta
SCREAMING_SNAKE_CASE_ : Tuple = legacy
SCREAMING_SNAKE_CASE_ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ : List[Any] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ : List[str] = self.used.shape[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ : Dict = self.re_embed
SCREAMING_SNAKE_CASE_ : Dict = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = n_e
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sane_index_shape
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inds.shape
assert len(__lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ : Optional[int] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.used.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ : int = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ : str = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ : Any = self.unknown_index
return new.reshape(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = inds.shape
assert len(__lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.used.to(__lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ : Any = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __lowerCAmelCase )
return back.reshape(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
# reshape z -> (batch, height, width, channel) and flatten
SCREAMING_SNAKE_CASE_ : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ : Dict = torch.argmin(torch.cdist(__lowerCAmelCase , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.embedding(__lowerCAmelCase ).view(z.shape )
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Dict = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ : Any = self.remap_to_used(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ : Optional[int] = self.unmap_to_all(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ : List[Any] = self.embedding(__lowerCAmelCase )
if shape is not None:
SCREAMING_SNAKE_CASE_ : Dict = z_q.view(__lowerCAmelCase )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Tuple = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.chunk(__lowerCAmelCase , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ : Dict = deterministic
SCREAMING_SNAKE_CASE_ : List[Any] = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ : List[str] = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __A ( self , __lowerCAmelCase = None ):
# make sure sample is on the same device as the parameters and has same dtype
SCREAMING_SNAKE_CASE_ : int = randn_tensor(
self.mean.shape , generator=__lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ : Any = self.mean + self.std * sample
return x
def __A ( self , __lowerCAmelCase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__lowerCAmelCase )
def __A ( self ):
return self.mean
| 311 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__: Dict = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Union[str, Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase =[8, 5, 9, 7]
UpperCAmelCase =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> None:
A = claim_vector
A = allocated_resources_table
A = maximum_claim_table
def UpperCamelCase__ ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase__ ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase__ ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase__ ( self ) -> dict[int, list[int]]:
return {self.__need().index(lowerCamelCase_ ): i for i in self.__need()}
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> None:
A = self.__need()
A = self.__allocated_resources_table
A = self.__available_resources()
A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 5_0 + """\n""" )
while need_list:
A = False
for each_need in need_list:
A = True
for index, need in enumerate(lowerCamelCase_ ):
if need > available_resources[index]:
A = False
break
if execution:
A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(lowerCamelCase_ )
# update available/freed resources stack
A = np.array(lowerCamelCase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(lowerCamelCase_ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCamelCase__ ( self ) -> Union[str, Any]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(lowerCamelCase_ ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(lowerCamelCase_ ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(lowerCamelCase_ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(lowerCamelCase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase ={
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _A ( _a : str ):
"""simple docstring"""
A = list(s_dict.keys() )
for key in keys:
A = r""".*/layers_(\d+)"""
A = key
if re.match(_a , _a ):
A = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , _a )
A = r"""(encoder|decoder)\/"""
if re.match(_a , _a ):
A = re.match(_a , _a ).groups()
if groups[0] == "encoder":
A = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , _a )
elif groups[0] == "decoder":
A = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , _a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A = new_key.replace(_a , _a )
print(f'{key} -> {new_key}' )
A = s_dict.pop(_a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A = s_dict[key].shape[0]
A = s_dict[key]
for idx in range(_a ):
A = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_a )
return s_dict
UpperCAmelCase ={
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _A ( _a : Dict , _a : int ):
"""simple docstring"""
import regex as re
with open(_a , """r""" ) as f:
A = f.read()
A = re.findall(r"""(.*) = ([0-9.]*)""" , _a )
A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A = float(_a ) if """.""" in value else int(_a )
A = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , _a )[0]
A = str(activation[1] )
A = num_experts
A = SwitchTransformersConfig(**_a )
return config
def _A ( _a : Union[str, Any] , _a : Dict , _a : Optional[Any]=None , _a : Dict="./" , _a : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A = checkpoints.load_tax_checkpoint(_a )
if gin_file is not None:
A = convert_gin_to_config(_a , _a )
else:
A = SwitchTransformersConfig.from_pretrained(_a )
A = SwitchTransformersForConditionalGeneration(_a )
A = flax_params["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = rename_keys(_a )
A = unflatten_dict(_a , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_a , _a )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
UpperCAmelCase =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 617 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase__ :
def __init__( self ,A__ ,A__ ,A__ ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
_A : Union[str, Any] = img
_A : Optional[int] = img.shape[1]
_A : List[str] = img.shape[0]
_A : Optional[Any] = dst_width
_A : str = dst_height
_A : str = self.src_w / self.dst_w
_A : Dict = self.src_h / self.dst_h
_A : Tuple = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def A__ ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_A : Any = self.img[self.get_y(__A )][self.get_x(__A )]
def A__ ( self ,A__ ):
return int(self.ratio_x * x )
def A__ ( self ,A__ ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Optional[Any] =800, 600
_UpperCamelCase : Optional[int] =imread('image_data/lena.jpg', 1)
_UpperCamelCase : Optional[int] =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Any ={'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Union[str, Any] =['ViTFeatureExtractor']
_UpperCamelCase : Any =['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any =[
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int =[
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] =[
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCamelCase : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase ( cls : Any ):
lowerCAmelCase_ : Tuple = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowerCamelCase ( cls : Any ):
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase_ : List[str] = FlaxBertModel(a_ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
lowerCAmelCase_ : Any = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a_ , repo_id="test-model-flax" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : str = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1e-3 , msg=f'''{key} not identical''' )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase_ : Dict = FlaxBertModel(a_ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
lowerCAmelCase_ : List[str] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowerCAmelCase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
a_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : Dict = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
lowerCAmelCase_ : List[str] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a_ , 1e-3 , msg=f'''{key} not identical''' )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
lowerCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCAmelCase_ : Any = False
return models_are_equal
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCAmelCase_ : List[Any] = FlaxBertModel(a_ )
lowerCAmelCase_ : Any = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a_ , a_ ) )
with self.assertRaises(a_ ):
lowerCAmelCase_ : Any = FlaxBertModel.from_pretrained(a_ )
lowerCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertTrue(check_models_equal(a_ , a_ ) )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCAmelCase_ : List[str] = FlaxBertModel(a_ )
lowerCAmelCase_ : Any = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a_ , a_ ) , max_shard_size="10KB" )
with self.assertRaises(a_ ):
lowerCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(a_ )
lowerCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertTrue(check_models_equal(a_ , a_ ) )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = "bert"
lowerCAmelCase_ : int = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(a_ ):
lowerCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(a_ )
lowerCAmelCase_ : Any = FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertIsNotNone(a_ )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Dict = "bert"
lowerCAmelCase_ : Optional[int] = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(a_ ):
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(a_ )
lowerCAmelCase_ : str = FlaxBertModel.from_pretrained(a_ , subfolder=a_ )
self.assertIsNotNone(a_ )
| 610 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 610 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Any = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = val
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase__ : Optional[Any] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
UpperCamelCase__ : List[str] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
UpperCamelCase__ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
UpperCamelCase__ : Union[str, Any] = qkv_bias
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Tuple = 364 if '''coco''' in model_name else 224
UpperCamelCase__ : Dict = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase__ : Dict = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase__ : Optional[int] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase__ : Dict = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase__ : str = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCamelCase__ : Dict = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : str=False ):
"""simple docstring"""
UpperCamelCase__ : str = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCamelCase__ : int = tokenizer('''\n''' , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
UpperCamelCase__ , UpperCamelCase__ : List[Any] = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Dict = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCamelCase__ , UpperCamelCase__ : int = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCamelCase__ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCamelCase__ : List[Any] = original_model.state_dict()
UpperCamelCase__ : List[str] = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase__ : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
UpperCamelCase__ : Union[str, Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCamelCase__ : str = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCamelCase__ : Dict = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCamelCase__ : str = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCamelCase__ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCamelCase__ : List[Any] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : Any = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase__ : str = load_demo_image()
UpperCamelCase__ : Any = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
UpperCamelCase__ : Dict = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase__ : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCamelCase__ : Any = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
UpperCamelCase__ : List[Any] = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCamelCase__ : str = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase__ : List[Any] = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase__ : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
UpperCamelCase__ : List[str] = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCamelCase__ : List[str] = ''''''
UpperCamelCase__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
UpperCamelCase__ : Dict = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = input_ids.shape[1]
UpperCamelCase__ : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
__UpperCamelCase : List[str] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 106 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = mock.Mock()
UpperCamelCase__ : Tuple = 500
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = HTTPError
UpperCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : List[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
UpperCamelCase__ : Any = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = mock.Mock()
UpperCamelCase__ : Any = 500
UpperCamelCase__ : str = {}
UpperCamelCase__ : Optional[Any] = HTTPError
UpperCamelCase__ : Tuple = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase__ ) as mock_head:
UpperCamelCase__ : str = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
try:
UpperCamelCase__ : Dict = tempfile.mktemp()
with open(lowerCamelCase__ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCamelCase__ )
UpperCamelCase__ : Dict = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __magic_name__ ( unittest.TestCase):
A: List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCAmelCase__ ( cls : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCAmelCase__ ( cls : Any ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : int = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCamelCase__ : Tuple = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
UpperCamelCase__ : Union[str, Any] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id='''test-tokenizer''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ : str = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCamelCase__ : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
UpperCamelCase__ : Optional[Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ : Union[str, Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCamelCase__ : Any = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCamelCase__ : str = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase__ , '''vocab.txt''' )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCamelCase__ : List[Any] = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCamelCase__ : str = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Any = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = Trie()
UpperCamelCase__ : Optional[Any] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ['''AB''', '''C'''] )
| 106 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ ( __A , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MvpTokenizer
SCREAMING_SNAKE_CASE__ = MvpTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = filter_roberta_detectors
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE : Optional[int] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(UpperCamelCase_ , max_length=len(UpperCamelCase_ ) , padding=UpperCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , UpperCamelCase_ )
self.assertIn("""attention_mask""" , UpperCamelCase_ )
self.assertNotIn("""labels""" , UpperCamelCase_ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(text_target=UpperCamelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ["""A long paragraph for summarization."""]
SCREAMING_SNAKE_CASE : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : Optional[int] = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE : int = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 379 |
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
a_ : Optional[Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
a_ : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , """html.parser""" )
a_ : Tuple = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 419 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Tuple = 3
lowerCamelCase_ : List[Any] = 250
lowerCamelCase_ : Optional[int] = ids_tensor((batch_size, length) , a_ )
lowerCamelCase_ : List[str] = torch.ones((batch_size, length) , device=a_ , dtype=torch.float ) / length
return input_ids, scores
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self._get_tensors(5 )
lowerCamelCase_ : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Any = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = MaxLengthCriteria(max_length=10 )
lowerCamelCase_ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Any = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Dict = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCamelCase_ : str = self._get_tensors(5 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(a_ , a_ ) )
lowerCamelCase_ : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self._get_tensors(5 )
lowerCamelCase_ : Optional[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a_ , a_ ) )
lowerCamelCase_ : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a_ , a_ ) )
def _UpperCamelCase ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCamelCase_ : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a_ ) , 1 )
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE = n - k
# Calculate C(n,k)
for i in range(UpperCAmelCase__ ):
result *= n - i
result //= i + 1
return result
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
return binomial_coefficient(2 * node_count , UpperCAmelCase__ ) // (node_count + 1)
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
return catalan_number(UpperCAmelCase__ ) * factorial(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 403 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : str = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""pixel_values"""]
def __init__( self : Union[str, Any] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : int , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Any = size if size is not None else {"""shortest_edge""": 256}
_snake_case : List[Any] = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
_snake_case : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case : int = get_size_dict(lowerCAmelCase)
_snake_case : Any = do_resize
_snake_case : str = size
_snake_case : Optional[Any] = resample
_snake_case : Optional[Any] = do_center_crop
_snake_case : Optional[Any] = crop_size
_snake_case : Optional[int] = do_rescale
_snake_case : Tuple = rescale_factor
_snake_case : Optional[Any] = do_normalize
_snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_snake_case : Optional[Any] = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_snake_case : Optional[int] = get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase)
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
_snake_case : str = get_size_dict(lowerCAmelCase)
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any]) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : str = size if size is not None else self.size
_snake_case : Union[str, Any] = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
_snake_case : Optional[int] = resample if resample is not None else self.resample
_snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_snake_case : int = get_size_dict(lowerCAmelCase)
_snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : str = image_mean if image_mean is not None else self.image_mean
_snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
_snake_case : List[Any] = make_list_of_images(lowerCAmelCase)
if not valid_images(lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_snake_case : Optional[Any] = [to_numpy_array(lowerCAmelCase) for image in images]
if do_resize:
_snake_case : str = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase) for image in images]
if do_center_crop:
_snake_case : int = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase) for image in images]
if do_rescale:
_snake_case : Dict = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase) for image in images]
if do_normalize:
_snake_case : Tuple = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase) for image in images]
_snake_case : Union[str, Any] = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase) for image in images]
_snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase)
| 718 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
# initialize config
if "resnet-50" in model_name:
_snake_case : List[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_snake_case : Any = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_snake_case : Union[str, Any] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE__ , backbone_config=SCREAMING_SNAKE_CASE__ )
# set label attributes
_snake_case : List[str] = """panoptic""" in model_name
if is_panoptic:
_snake_case : Optional[int] = 250
else:
_snake_case : Optional[int] = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : Optional[int] = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Dict = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
_snake_case : str = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : str = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str=False ) -> Union[str, Any]:
_snake_case : Optional[Any] = """"""
if is_panoptic:
_snake_case : Optional[Any] = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[:256, :]
_snake_case : Any = in_proj_bias[:256]
_snake_case : List[Any] = in_proj_weight[256:512, :]
_snake_case : Optional[int] = in_proj_bias[256:512]
_snake_case : int = in_proj_weight[-256:, :]
_snake_case : Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_snake_case : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[int] = in_proj_weight[:256, :]
_snake_case : Optional[int] = in_proj_bias[:256]
_snake_case : Any = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : int = in_proj_weight[-256:, :]
_snake_case : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : List[str] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_snake_case : Optional[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : int = in_proj_weight_cross_attn[:256, :]
_snake_case : Tuple = in_proj_bias_cross_attn[:256]
_snake_case : int = in_proj_weight_cross_attn[256:512, :]
_snake_case : Tuple = in_proj_bias_cross_attn[256:512]
_snake_case : Dict = in_proj_weight_cross_attn[-256:, :]
_snake_case : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowercase ( ) -> Optional[Any]:
_snake_case : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[Any]:
_snake_case , _snake_case : Union[str, Any] = get_detr_config(SCREAMING_SNAKE_CASE__ )
# load original model from torch hub
_snake_case : Dict = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F'''Converting model {model_name}...''' )
_snake_case : List[Any] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE__ ).eval()
_snake_case : Optional[int] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE__ ):
if is_panoptic:
_snake_case : int = """detr.""" + src
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : List[Any] = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_snake_case : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_snake_case : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_snake_case : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_snake_case : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = val
# finally, create HuggingFace model and load state dict
_snake_case : int = DetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# verify our conversion on an image
_snake_case : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_snake_case : int = DetrImageProcessor(format=SCREAMING_SNAKE_CASE__ )
_snake_case : int = processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : str = encoding["""pixel_values"""]
_snake_case : Tuple = detr(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
a__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 198 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = GPTSwaTokenizer
A__ : Dict = False
A__ : List[str] = True
A__ : Optional[Any] = False
def _a ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = GPTSwaTokenizer(_snake_case , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Any , _snake_case : Tuple ):
"""simple docstring"""
A__ = 'This is a test'
A__ = 'This is a test'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_snake_case ) , 20_00 )
def _a ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _a ( self : str ):
"""simple docstring"""
A__ = GPTSwaTokenizer(_snake_case )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A__ = tokenizer.convert_ids_to_tokens(_snake_case )
# fmt: off
self.assertListEqual(
_snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _a ( self : Tuple ):
"""simple docstring"""
A__ = GPTSwaTokenizer(_snake_case )
A__ = ['This is a test', 'I was born in 92000, and this is falsé.']
A__ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_snake_case , _snake_case ):
self.assertListEqual(tokenizer.encode_fast(_snake_case ) , _snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(_snake_case , _snake_case ):
self.assertEqual(tokenizer.decode_fast(_snake_case ) , _snake_case )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A__ = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_snake_case , )
| 9 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowercase =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase ='src/transformers'
shutil.rmtree(self.transformer_dir )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> str:
'''simple docstring'''
_lowercase =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase =black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
_lowercase =os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowerCAmelCase , 'w' , newline='\n' ) as f:
f.write(lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase )
with open(lowerCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase ) , )
# Copy consistency with a really long name
_lowercase ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase , lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase ) , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
self.assertFalse(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase )
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 291 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case_ ) -> Tuple:
super().__init__()
_UpperCAmelCase = torchvision.models.resnetaaa(pretrained=__A )
_UpperCAmelCase = list(model.children() )[:-2]
_UpperCAmelCase = nn.Sequential(*__A )
_UpperCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __A ( self , snake_case_ ) -> List[Any]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_UpperCAmelCase = self.pool(self.model(__A ) )
_UpperCAmelCase = torch.flatten(__A , start_dim=2 )
_UpperCAmelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __lowercase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = [json.loads(__A ) for l in open(__A )]
_UpperCAmelCase = os.path.dirname(__A )
_UpperCAmelCase = tokenizer
_UpperCAmelCase = labels
_UpperCAmelCase = len(__A )
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = transforms
def __len__( self ) -> Tuple:
return len(self.data )
def __getitem__( self , snake_case_ ) -> Dict:
_UpperCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__A ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
_UpperCAmelCase = sentence[: self.max_seq_length]
_UpperCAmelCase = torch.zeros(self.n_classes )
_UpperCAmelCase = 1
_UpperCAmelCase = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
_UpperCAmelCase = self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __A ( self ) -> List[Any]:
_UpperCAmelCase = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def A__ ( A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = [len(row["sentence"] ) for row in batch]
_UpperCAmelCase , _UpperCAmelCase = len(a__ ), max(a__ )
_UpperCAmelCase = torch.zeros(a__ , a__ , dtype=torch.long )
_UpperCAmelCase = torch.zeros(a__ , a__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a__ , a__ ) ):
_UpperCAmelCase = input_row["sentence"]
_UpperCAmelCase = 1
_UpperCAmelCase = torch.stack([row["image"] for row in batch] )
_UpperCAmelCase = torch.stack([row["label"] for row in batch] )
_UpperCAmelCase = torch.stack([row["image_start_token"] for row in batch] )
_UpperCAmelCase = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 714 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[Any] = ["image_processor", "tokenizer"]
A__ : List[Any] = "BlipImageProcessor"
A__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_UpperCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def __A ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __A ( self ) -> Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 579 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =DownBlockaD # noqa F405
__lowerCamelCase : List[str] ='down'
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =ResnetDownsampleBlockaD # noqa F405
__lowerCamelCase : Tuple ='down'
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =AttnDownBlockaD # noqa F405
__lowerCamelCase : Optional[int] ='down'
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] =CrossAttnDownBlockaD # noqa F405
__lowerCamelCase : int ='down'
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =SimpleCrossAttnDownBlockaD # noqa F405
__lowerCamelCase : List[str] ='down'
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] =SkipDownBlockaD # noqa F405
__lowerCamelCase : Optional[int] ='down'
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =AttnSkipDownBlockaD # noqa F405
__lowerCamelCase : List[str] ='down'
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =DownEncoderBlockaD # noqa F405
__lowerCamelCase : List[str] ='down'
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =AttnDownEncoderBlockaD # noqa F405
__lowerCamelCase : str ='down'
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] =UNetMidBlockaD # noqa F405
__lowerCamelCase : Union[str, Any] ='mid'
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =UNetMidBlockaDCrossAttn # noqa F405
__lowerCamelCase : Dict ='mid'
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] =UNetMidBlockaDSimpleCrossAttn # noqa F405
__lowerCamelCase : Optional[Any] ='mid'
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] =UpBlockaD # noqa F405
__lowerCamelCase : Tuple ='up'
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] =ResnetUpsampleBlockaD # noqa F405
__lowerCamelCase : List[Any] ='up'
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =CrossAttnUpBlockaD # noqa F405
__lowerCamelCase : Union[str, Any] ='up'
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[Any] =SimpleCrossAttnUpBlockaD # noqa F405
__lowerCamelCase : List[str] ='up'
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase , include_encoder_hidden_states=__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =AttnUpBlockaD # noqa F405
__lowerCamelCase : int ='up'
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =SkipUpBlockaD # noqa F405
__lowerCamelCase : Union[str, Any] ='up'
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =AttnSkipUpBlockaD # noqa F405
__lowerCamelCase : List[str] ='up'
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : int =UpDecoderBlockaD # noqa F405
__lowerCamelCase : List[str] ='up'
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = {"""in_channels""": 32, """out_channels""": 32}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__lowercase )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Any =AttnUpDecoderBlockaD # noqa F405
__lowerCamelCase : Any ='up'
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {"""in_channels""": 32, """out_channels""": 32}
__a = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__lowercase )
| 225 |
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase__ ( __lowercase , __lowercase):
'''simple docstring'''
@register_to_config
def __init__( self :int , a :int = 7_6_8 , ) -> List[Any]:
super().__init__()
__UpperCamelCase : Any = nn.Parameter(torch.zeros(1 , a ) )
__UpperCamelCase : List[str] = nn.Parameter(torch.ones(1 , a ) )
def _lowerCamelCase ( self :Any , a :Optional[Union[str, torch.device]] = None , a :Optional[torch.dtype] = None , ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = nn.Parameter(self.mean.to(a ).to(a ) )
__UpperCamelCase : List[str] = nn.Parameter(self.std.to(a ).to(a ) )
return self
def _lowerCamelCase ( self :List[str] , a :Tuple ) -> int:
__UpperCamelCase : Optional[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self :List[str] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Dict = (embeds * self.std) + self.mean
return embeds | 94 |
import re
import string
import numpy as np
import datasets
lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self :int , a :Optional[Any] , a :Dict , a :Optional[int]=None , a :int=False , a :Tuple=False , a :Optional[int]=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCamelCase : List[Any] = np.array([re.sub(a , "" , a ) for x in predictions] )
__UpperCamelCase : Optional[Any] = np.array([re.sub(a , "" , a ) for x in references] )
else:
__UpperCamelCase : Optional[int] = np.asarray(a )
__UpperCamelCase : List[str] = np.asarray(a )
if ignore_case:
__UpperCamelCase : Optional[int] = np.char.lower(a )
__UpperCamelCase : str = np.char.lower(a )
if ignore_punctuation:
__UpperCamelCase : Tuple = string.punctuation.maketrans("" , "" , string.punctuation )
__UpperCamelCase : int = np.char.translate(a , table=a )
__UpperCamelCase : str = np.char.translate(a , table=a )
if ignore_numbers:
__UpperCamelCase : List[str] = string.digits.maketrans("" , "" , string.digits )
__UpperCamelCase : Tuple = np.char.translate(a , table=a )
__UpperCamelCase : Union[str, Any] = np.char.translate(a , table=a )
__UpperCamelCase : List[Any] = predictions == references
return {"exact_match": np.mean(a ) * 1_0_0} | 94 | 1 |
def a ( A__ : Optional[int] ) -> List[str]:
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_A , _A ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
_lowercase =False
if num < 0:
_lowercase =True
_lowercase =-num
_lowercase =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_A ) for e in binary )
return "0b" + "".join(str(_A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCamelCase ( ):
lowerCAmelCase_ = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCAmelCase_ = Dataset.from_dict(_A )
return dataset
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = get_dataset()
lowerCAmelCase_ = make_duplicate_clusters(UpperCamelCase__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = get_dataset()
lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ), 2 )
print(UpperCamelCase__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], UpperCamelCase__ )
| 431 | 0 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple = "x" , SCREAMING_SNAKE_CASE_ : Optional[Any] = 10**-10 , SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 , ) -> complex:
"""simple docstring"""
UpperCAmelCase = symbols(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , diff(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE_ ) != 0:
UpperCAmelCase = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE_ ) / diff_function(
SCREAMING_SNAKE_CASE_ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 719 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : int , a__ : Optional[Any]=3 , a__ : Tuple=32 , a__ : Optional[int]=3 , a__ : Optional[int]=10 , a__ : Optional[Any]=[8, 16, 32, 64] , a__ : List[str]=[1, 1, 2, 1] , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]="relu" , a__ : Any=3 , a__ : int=None , a__ : List[str]=["stage2", "stage3", "stage4"] , a__ : Optional[int]=[2, 3, 4] , a__ : Optional[Any]=1 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(a__ )
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = num_groups
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Dict ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Union[str, Any] ):
UpperCAmelCase = BitModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , a__ : List[Any] , a__ : Optional[Any] , a__ : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = BitForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Dict , a__ : Optional[int] , a__ : str , a__ : int ):
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = BitBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCamelCase =(
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
UpperCAmelCase = BitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __snake_case ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : List[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __snake_case ( self : int ):
def check_hidden_states_output(a__ : List[str] , a__ : Tuple , a__ : str ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __snake_case ( self : List[str] ):
pass
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Dict ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(BitBackbone,) if is_torch_available() else ()
_lowerCamelCase =BitConfig
_lowerCamelCase =False
def __snake_case ( self : Dict ):
UpperCAmelCase = BitModelTester(self )
| 570 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase = {'bert_for_seq_generation': 5_1_2}
class lowerCamelCase_ ( A__ ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = []
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<::::>" , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Any:
"""simple docstring"""
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self , lowerCamelCase_ ) -> Any:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.sp_model.IdToPiece(lowerCamelCase_ )
return token
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
_UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , "wb" ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 147 |
import sys
import turtle
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
A__ : Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
A__ : Union[str, Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 183 | 0 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowercase : Optional[int] = get_logger(__name__)
class _A :
"""simple docstring"""
UpperCamelCase_ : Any = 'dummy_data'
UpperCamelCase_ : List[str] = 'datasets'
UpperCamelCase_ : Dict = False
def __init__( self : str , A_ : str , A_ : str , A_ : Union[Version, str] , A_ : Optional[str] = None , A_ : bool = False , A_ : bool = True , A_ : Optional[List[Callable]] = None , ) -> Optional[Any]:
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(UpperCAmelCase__ )
# to be downloaded
__snake_case = None
__snake_case = None
@property
def lowercase ( self : Optional[Any] ) -> Optional[int]:
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def lowercase ( self : Tuple ) -> List[str]:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def lowercase ( self : List[Any] ) -> Optional[Any]:
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def lowercase ( self : Any ) -> Optional[Any]:
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
UpperCAmelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase__ , force_extract=UpperCAmelCase__ )
return os.path.join(UpperCAmelCase__ , self.dummy_file_name )
@property
def lowercase ( self : List[str] ) -> str:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase ( self : int ) -> str:
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def lowercase ( self : Optional[Any] ) -> str:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def lowercase ( self : str , A_ : Optional[int] , *A_ : Any ) -> str:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return self.create_dummy_data_dict(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase__ , UpperCAmelCase__ )
else:
return self.create_dummy_data_single(UpperCAmelCase__ , UpperCAmelCase__ )
def lowercase ( self : Dict , A_ : List[Any] , *A_ : Optional[int] ) -> Dict:
return self.download_and_extract(UpperCAmelCase__ )
def lowercase ( self : Any , A_ : int , A_ : Optional[Any] ) -> Union[str, Any]:
return self.download_and_extract(UpperCAmelCase__ )
def lowercase ( self : int , A_ : Optional[Any] , *A_ : Dict , **A_ : Any ) -> Optional[Any]:
return path
def lowercase ( self : List[str] ) -> List[Any]:
return {}
def lowercase ( self : List[str] , A_ : int , A_ : str ) -> Dict:
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for single_url in single_urls:
download_callback(UpperCAmelCase__ )
else:
__snake_case = single_urls
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__snake_case = [os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(Path(UpperCAmelCase__ ).name ) ) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(Path(UpperCAmelCase__ ).name ) )
__snake_case = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase ( self : Optional[Any] , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Any:
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , UpperCAmelCase__ ) ) for url in data_url )
__snake_case = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(UpperCAmelCase__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCAmelCase__ )
return dummy_data_list
def lowercase ( self : Dict , A_ : Optional[int] , A_ : List[str] ) -> Tuple:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(UpperCAmelCase__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCAmelCase__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase ( self : Union[str, Any] ) -> int:
pass
def lowercase ( self : str ) -> Tuple:
pass
def lowercase ( self : Dict , A_ : Union[str, Any] ) -> List[Any]:
def _iter_archive_members(A_ : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file ).parent
__snake_case = path.relative_to(UpperCAmelCase__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase__ )
__snake_case = Path(UpperCAmelCase__ )
__snake_case = _iter_archive_members(UpperCAmelCase__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCAmelCase__ ).as_posix(), file_path.open('''rb''' )
def lowercase ( self : Any , A_ : Dict ) -> List[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__snake_case = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase__ ):
if os.path.basename(UpperCAmelCase__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase__ ):
if os.path.basename(UpperCAmelCase__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) | 718 | """simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : List[str] , A_ : List[Any]=100 , A_ : Any=13 , A_ : Dict=30 , A_ : Optional[int]=2 , A_ : str=3 , A_ : Tuple=True , A_ : str=True , A_ : Union[str, Any]=32 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=37 , A_ : Any="gelu" , A_ : List[str]=0.1 , A_ : int=0.1 , A_ : Tuple=10 , A_ : int=0.02 , A_ : Tuple=3 , ) -> Optional[int]:
__snake_case = parent
__snake_case = vocab_size
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase ( self : Optional[int] , A_ : str , A_ : str , A_ : List[str] ) -> List[Any]:
__snake_case = FlaxBeitModel(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : List[str] , A_ : Any , A_ : Any , A_ : str ) -> str:
__snake_case = FlaxBeitForMaskedImageModeling(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : List[str] ) -> str:
__snake_case = self.type_sequence_label_size
__snake_case = FlaxBeitForImageClassification(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxBeitForImageClassification(A_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(A_ )
def lowercase ( self : Dict ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase ( self : List[Any] ) -> None:
__snake_case = FlaxBeitModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase ( self : int ) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase ( self : Union[str, Any] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = model_class(A_ )
@jax.jit
def model_jitted(A_ : Union[str, Any] , **A_ : Union[str, Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : str ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowercase ( self : str ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
def SCREAMING_SNAKE_CASE ( ):
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Union[str, Any] ) -> List[str]:
__snake_case = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__snake_case = np.ones((1, 196) , dtype=A_ )
# forward pass
__snake_case = model(pixel_values=A_ , bool_masked_pos=A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 196, 8_192)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) )
@slow
def lowercase ( self : List[Any] ) -> List[str]:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 1_000)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def lowercase ( self : int ) -> str:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 21_841)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 2_396
self.assertEqual(logits.argmax(-1 ).item() , A_ ) | 93 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDIMScheduler ) -> List[str]:
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCAmelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase__ , )
lowerCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
lowerCAmelCase = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase = self.vqvae.decode(UpperCAmelCase__ ).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 133 |
'''simple docstring'''
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 | 1 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCamelCase__ ( __magic_name__ : Tuple=None ) -> int:
'''simple docstring'''
if subparsers is not None:
snake_case__ : int = subparsers.add_parser("""env""" )
else:
snake_case__ : Dict = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__magic_name__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = torch.__version__
snake_case__ : List[Any] = torch.cuda.is_available()
snake_case__ : int = is_xpu_available()
snake_case__ : List[Any] = is_npu_available()
snake_case__ : List[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__magic_name__ ):
snake_case__ : str = load_config_from_file(args.config_file ).to_dict()
snake_case__ : Union[str, Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__magic_name__ ),
"""PyTorch NPU available""": str(__magic_name__ ),
"""System RAM""": f"{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB",
}
if pt_cuda_available:
snake_case__ : List[Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
snake_case__ : Any = (
"""\n""".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__magic_name__ , __magic_name__ )
else f"\t{accelerate_config}"
)
print(__magic_name__ )
snake_case__ : List[Any] = accelerate_config
return info
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = env_command_parser()
snake_case__ : List[str] = parser.parse_args()
env_command(__magic_name__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 716 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : int = tempfile.mkdtemp()
snake_case__ : Optional[int] = 8
# DPR tok
snake_case__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Optional[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
snake_case__ : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case__ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
snake_case__ : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
snake_case__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 419 | 0 |
def lowerCamelCase__ ( lowercase = 50 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 62 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
def constraint_to_multiple_of(lowercase , lowercase , lowercase=0 , lowercase=None ):
SCREAMING_SNAKE_CASE : Any = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : int = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Tuple = (output_size, output_size) if isinstance(lowercase , lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_image_size(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Tuple = output_height / input_height
SCREAMING_SNAKE_CASE : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE : int = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase )
SCREAMING_SNAKE_CASE : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : int = ensure_multiple_of
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : List[str] = do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ):
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(
UpperCAmelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCAmelCase_ , multiple=UpperCAmelCase_ , )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Dict , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Any = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Tuple] = None ):
SCREAMING_SNAKE_CASE : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : str = []
for idx in range(len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : str = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 62 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=0.6 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Tuple = parent
__magic_name__ :Dict = batch_size
__magic_name__ :Any = image_size
__magic_name__ :Dict = patch_size
__magic_name__ :List[Any] = num_channels
__magic_name__ :int = is_training
__magic_name__ :Union[str, Any] = use_labels
__magic_name__ :str = hidden_size
__magic_name__ :Dict = num_hidden_layers
__magic_name__ :Any = num_attention_heads
__magic_name__ :Dict = intermediate_size
__magic_name__ :Optional[Any] = hidden_act
__magic_name__ :Optional[Any] = hidden_dropout_prob
__magic_name__ :Optional[int] = attention_probs_dropout_prob
__magic_name__ :str = type_sequence_label_size
__magic_name__ :int = initializer_range
__magic_name__ :Union[str, Any] = mask_ratio
__magic_name__ :Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ :Any = (image_size // patch_size) ** 2
__magic_name__ :Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ :int = None
if self.use_labels:
__magic_name__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def A ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFViTMAEModel(config=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFViTMAEForPreTraining(__lowerCAmelCase )
__magic_name__ :Optional[int] = model(__lowerCAmelCase , training=__lowerCAmelCase )
# expected sequence length = num_patches
__magic_name__ :Dict = (self.image_size // self.patch_size) ** 2
__magic_name__ :str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ :Tuple = 1
__magic_name__ :int = TFViTMAEForPreTraining(__lowerCAmelCase )
__magic_name__ :Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase , training=__lowerCAmelCase )
__magic_name__ :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) :List[Any] = config_and_inputs
__magic_name__ :int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFViTMAEModelTester(self )
__magic_name__ :int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ :Optional[Any] = model_class(__lowerCAmelCase )
__magic_name__ :str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ :Union[str, Any] = [*signature.parameters.keys()]
__magic_name__ :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Any = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :Any = model_class(__lowerCAmelCase )
__magic_name__ :Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Any = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Optional[Any] = copy.deepcopy(self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__magic_name__ :List[str] = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Optional[Any] = outputs_dict[0].numpy()
__magic_name__ :str = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def A ( self ):
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__lowerCAmelCase ):
__magic_name__ :List[Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCAmelCase ):
__magic_name__ :Any = v.numpy()
else:
__magic_name__ :Optional[int] = np.array(__lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ :Dict = model_class(__lowerCAmelCase )
__magic_name__ :Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = prepare_numpy_arrays(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :Any = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
__magic_name__ :List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ :Optional[int] = tf.constant(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ :str = tf_noise
super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCAmelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__lowerCAmelCase , __lowerCAmelCase ),)
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCAmelCase , '''_keras_serializable''' , __lowerCAmelCase )
}
__magic_name__ :Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ :Dict = tf.convert_to_tensor(__lowerCAmelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ :Tuple = main_layer_class(__lowerCAmelCase )
__magic_name__ :Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ :List[Any] = tf.keras.Model(__lowerCAmelCase , outputs=main_layer(__lowerCAmelCase ) )
__magic_name__ :int = model(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ :Optional[Any] = os.path.join(__lowerCAmelCase , '''keras_model.h5''' )
model.save(__lowerCAmelCase )
__magic_name__ :Any = tf.keras.models.load_model(
__lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCAmelCase , tf.keras.Model )
__magic_name__ :int = model(__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :str = model_class(__lowerCAmelCase )
__magic_name__ :Optional[int] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ :Optional[int] = outputs.last_hidden_state.numpy()
__magic_name__ :Union[str, Any] = 0
else:
__magic_name__ :Any = outputs.logits.numpy()
__magic_name__ :Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase , saved_model=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = model_class.from_pretrained(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase , noise=__lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ :Tuple = after_outputs['''last_hidden_state'''].numpy()
__magic_name__ :Optional[int] = 0
else:
__magic_name__ :int = after_outputs['''logits'''].numpy()
__magic_name__ :Union[str, Any] = 0
__magic_name__ :Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase , 1E-5 )
def A ( self ):
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ :Optional[int] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ :List[Any] = model_class(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase , noise=__lowerCAmelCase )
__magic_name__ :List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCAmelCase )
__magic_name__ :int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ :Dict = model_class.from_config(model.config )
__magic_name__ :Any = new_model(__lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ :Any = new_model(__lowerCAmelCase , noise=__lowerCAmelCase )
self.assert_outputs_same(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def A ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def A ( self ):
"""simple docstring"""
pass
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__lowerCAmelCase )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def A ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def A ( self ):
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__magic_name__ :Union[str, Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__magic_name__ :str = self.default_image_processor
__magic_name__ :str = prepare_img()
__magic_name__ :List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ :List[str] = ViTMAEConfig()
__magic_name__ :Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ :Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ :Tuple = model(**__lowerCAmelCase , noise=__lowerCAmelCase )
# verify the logits
__magic_name__ :Tuple = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__magic_name__ :Any = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 180 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 180 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> int:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_choices
UpperCAmelCase = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Optional[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : int = False
_A : Dict = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = NystromformerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCAmelCase = model(snake_case__ )[0]
UpperCAmelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = """the [MASK] of Belgium is Brussels"""
UpperCAmelCase = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
UpperCAmelCase = tokenizer(snake_case__ , return_tensors="""pt""" )
with torch.no_grad():
UpperCAmelCase = model(encoding.input_ids ).logits
UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , """capital""" )
| 673 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase_ ( a_ ):
_A : int = 'wav2vec2'
def __init__( self , snake_case__=32 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=1_28 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=3_20 , snake_case__=2 , snake_case__=0.1 , snake_case__=1_00 , snake_case__=2_56 , snake_case__=2_56 , snake_case__=0.1 , snake_case__="sum" , snake_case__=False , snake_case__=False , snake_case__=2_56 , snake_case__=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=5_12 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = do_stable_layer_norm
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase = num_codevectors_per_group
UpperCAmelCase = num_codevector_groups
UpperCAmelCase = contrastive_logits_temperature
UpperCAmelCase = feat_quantizer_dropout
UpperCAmelCase = num_negatives
UpperCAmelCase = codevector_dim
UpperCAmelCase = proj_codevector_dim
UpperCAmelCase = diversity_loss_weight
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
UpperCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = list(snake_case__ )
UpperCAmelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 673 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0_0_0_0
__a = 5_0_0_0
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCamelCase_ ( a_ , a_ ) ->int:
for i in range(lowercase__ ):
A =dataset[i]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
for i in range(0 , len(lowercase__ ) , lowercase__ ):
A =dataset[i : i + batch_size]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
A =dataset[i]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->Optional[Any]:
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
A =dataset[i : i + batch_size]
def UpperCamelCase_ ( ) ->Union[str, Any]:
A ={"num examples": SPEED_TEST_N_EXAMPLES}
A =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
A =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
A =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
A =generate_example_dataset(
os.path.join(lowercase__ , "dataset.arrow" ) , lowercase__ , num_examples=lowercase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
A =func(lowercase__ , **lowercase__ )
print("shuffling dataset" )
A =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowercase__ ) )
A =func(
lowercase__ , **lowercase__ )
with open(lowercase__ , "wb" ) as f:
f.write(json.dumps(lowercase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
# Recurse if needed
if "." in tensor_name:
lowerCamelCase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase_ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowerCamelCase_ = new_module
lowerCamelCase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
lowerCamelCase_ = tensor_name in module._buffers
lowerCamelCase_ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
lowerCamelCase_ = False
lowerCamelCase_ = False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase_ = False
lowerCamelCase_ = False
else:
lowerCamelCase_ = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
lowerCamelCase_ = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase_ = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ ,torch.Tensor ):
lowerCamelCase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowerCamelCase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,lowerCAmelCase__ ) and fpaa_statistics is None:
lowerCamelCase_ = new_value.T
lowerCamelCase_ = old_value.__dict__
if is_abit:
lowerCamelCase_ = bnb.nn.IntaParams(lowerCAmelCase__ ,requires_grad=lowerCAmelCase__ ,**lowerCAmelCase__ ).to(lowerCAmelCase__ )
elif is_abit:
lowerCamelCase_ = bnb.nn.Paramsabit(lowerCAmelCase__ ,requires_grad=lowerCAmelCase__ ,**lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowerCamelCase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(lowerCAmelCase__ ) )
else:
if value is None:
lowerCamelCase_ = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ ,torch.Tensor ):
lowerCamelCase_ = value.to(lowerCAmelCase__ )
else:
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ ,device=lowerCAmelCase__ )
if is_buffer:
lowerCamelCase_ = new_value
else:
lowerCamelCase_ = nn.Parameter(lowerCAmelCase__ ,requires_grad=old_value.requires_grad )
lowerCamelCase_ = new_value
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=False ):
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase_ = []
current_key_name.append(lowerCAmelCase__ )
if (isinstance(lowerCAmelCase__ ,nn.Linear ) or isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowerCAmelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = module.weight.shape
else:
lowerCamelCase_ = module.in_features
lowerCamelCase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase_ = bnb.nn.LinearabitLt(
lowerCAmelCase__ ,lowerCAmelCase__ ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
lowerCamelCase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase_ = bnb.nn.Linearabit(
lowerCAmelCase__ ,lowerCAmelCase__ ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
lowerCamelCase_ = True
# Store the module class in case we need to transpose the weight later
lowerCamelCase_ = type(lowerCAmelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase__ )
if len(list(module.children() ) ) > 0:
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,has_been_replaced=lowerCAmelCase__ ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowercase ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,lowerCAmelCase__ ,)
return replace_with_bnb_linear(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def lowercase ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,lowerCAmelCase__ ,)
return set_module_quantized_tensor_to_device(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase_ = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
lowerCamelCase_ = sum(lowerCAmelCase__ ,[] )
lowerCamelCase_ = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowerCamelCase_ = not hasattr(lowerCAmelCase__ ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase_ = list(model.named_children() )
lowerCamelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase_ = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowerCamelCase_ = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowerCamelCase_ = ['''.weight''', '''.bias''']
lowerCamelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase_ = name.replace(lowerCAmelCase__ ,'''''' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCAmelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
UpperCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
UpperCAmelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase = evaluate(dataset=a_ , predictions=a_ )
return score
| 706 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
UpperCAmelCase = '''▁'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowercase = legacy
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
lowercase = vocab_file
lowercase = extra_ids
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ):
return list(
set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
lowercase = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase = SPIECE_UNDERLINE + text.replace(snake_case , ' ' )
return super().tokenize(snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
if not self.legacy:
lowercase = text.startswith(snake_case )
if is_first:
lowercase = text[1:]
lowercase = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ):
lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token.startswith('<extra_id_' ):
lowercase = re.match(r'<extra_id_(\d+)>' , snake_case )
lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index < self.sp_model.get_piece_size():
lowercase = self.sp_model.IdToPiece(snake_case )
else:
lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
lowercase = ''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(snake_case )
lowercase = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 565 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : Dict ={
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__magic_name__ , __magic_name__ : List[Any] =input_paths_and_base_extractors[compression_format]
if input_path is None:
__magic_name__ : Optional[Any] =F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase )
assert base_extractor.is_extractable(lowerCamelCase )
__magic_name__ : List[str] =tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(lowerCamelCase , lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ : Any =file_path.read_text(encoding="""utf-8""" )
else:
__magic_name__ : Any =output_path.read_text(encoding="""utf-8""" )
__magic_name__ : Tuple =text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__magic_name__ : List[str] ={
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__magic_name__ : int =input_paths[compression_format]
if input_path is None:
__magic_name__ : List[Any] =F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase )
__magic_name__ : Optional[int] =Extractor.infer_extractor_format(lowerCamelCase )
assert extractor_format is not None
__magic_name__ : List[str] =tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__magic_name__ : List[str] =file_path.read_text(encoding="""utf-8""" )
else:
__magic_name__ : Optional[int] =output_path.read_text(encoding="""utf-8""" )
__magic_name__ : str =text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
__magic_name__ : Union[str, Any] =tmp_path / """data_dot_dot"""
directory.mkdir()
__magic_name__ : List[str] =directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(lowerCamelCase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase ):
import tarfile
__magic_name__ : Any =tmp_path / """data_sym_link"""
directory.mkdir()
__magic_name__ : Dict =directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowerCamelCase )
with tarfile.TarFile(lowerCamelCase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : int ={
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__magic_name__ : Optional[int] =insecure_tar_files[insecure_tar_file]
__magic_name__ : str =tmp_path / """extracted"""
TarExtractor.extract(lowerCamelCase , lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( lowerCamelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__magic_name__ : List[str] =tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__magic_name__ : Any =(
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(lowerCamelCase )
assert zipfile.is_zipfile(str(lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase ) # but we're right
| 21 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowercase_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowercase_ = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
lowercase_ = '|'.join(sys.argv[1:])
lowercase_ = re.compile(rf"""^({joined_dirs}).*?\.py$""")
lowercase_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 562 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowercase ( a ):
_UpperCamelCase = """marian"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _UpperCAmelCase=58_101 , _UpperCAmelCase=None , _UpperCAmelCase=1_024 , _UpperCAmelCase=12 , _UpperCAmelCase=4_096 , _UpperCAmelCase=16 , _UpperCAmelCase=12 , _UpperCAmelCase=4_096 , _UpperCAmelCase=16 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=1_024 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=58_100 , _UpperCAmelCase=False , _UpperCAmelCase=58_100 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
A : Dict = vocab_size
A : Optional[int] = decoder_vocab_size or vocab_size
A : int = max_position_embeddings
A : Union[str, Any] = d_model
A : Dict = encoder_ffn_dim
A : Dict = encoder_layers
A : Tuple = encoder_attention_heads
A : int = decoder_ffn_dim
A : List[Any] = decoder_layers
A : Union[str, Any] = decoder_attention_heads
A : Optional[int] = dropout
A : Optional[Any] = attention_dropout
A : int = activation_dropout
A : Dict = activation_function
A : Union[str, Any] = init_std
A : List[str] = encoder_layerdrop
A : Optional[Any] = decoder_layerdrop
A : List[str] = use_cache
A : Tuple = encoder_layers
A : int = scale_embedding # scale factor will be sqrt(d_model) if True
A : Tuple = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
class _lowercase ( a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
A : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A : str = {0: '''batch'''}
A : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
A : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A : Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A, A : Tuple = self.num_layers
for i in range(_UpperCAmelCase ):
A : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
A : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
A : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
A : Union[str, Any] = super().outputs
else:
A : Union[str, Any] = super(_UpperCAmelCase , self ).outputs
if self.use_past:
A, A : Tuple = self.num_layers
for i in range(_UpperCAmelCase ):
A : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
A : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
A : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Generate decoder inputs
A : Any = seq_length if not self.use_past else 1
A : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A : Optional[int] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A : Dict = dict(**_UpperCAmelCase , **_UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Any = common_inputs['''input_ids'''].shape
A : List[Any] = common_inputs['''decoder_input_ids'''].shape[1]
A, A : Union[str, Any] = self.num_attention_heads
A : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : int = decoder_seq_length + 3
A : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A : Dict = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase )] , dim=1 )
A : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A : Tuple = self.num_layers
A : Tuple = min(_UpperCAmelCase , _UpperCAmelCase )
A : Tuple = max(_UpperCAmelCase , _UpperCAmelCase ) - min_num_layers
A : List[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
torch.zeros(_UpperCAmelCase ),
) )
# TODO: test this.
A : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_UpperCAmelCase , _UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) )
return common_inputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
A : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : int = seqlen + 2
A, A : str = self.num_layers
A, A : List[Any] = self.num_attention_heads
A : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A : str = common_inputs['''attention_mask'''].dtype
A : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
A : List[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(_UpperCAmelCase )
]
return common_inputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : int = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : List[str] = tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
A : List[Any] = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
A : str = dict(tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) )
return common_inputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
A : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
else:
A : Dict = self._generate_dummy_inputs_for_causal_lm(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
return common_inputs
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
A : List[Any] = super()._flatten_past_key_values_(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
A : Tuple = super(_UpperCAmelCase , self )._flatten_past_key_values_(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@property
def snake_case ( self ):
return 1E-4
| 537 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase( UpperCamelCase__ : int ) -> list[int]:
if num <= 0:
A : str = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(UpperCamelCase__ )
A : int = [True] * (num + 1)
A : Dict = []
A : str = 2
A : Any = int(math.sqrt(UpperCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase__ ):
if sieve[i] is True:
A : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 537 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase_ ( unittest.TestCase ):
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[int]=30 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=10 , __lowerCamelCase : int=0.0_2 , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : str = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : List[str] = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : int = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : List[str] = num_patches + 1
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
snake_case__ : Union[str, Any] = FlaxViTModel(config=__lowerCamelCase )
snake_case__ : List[Any] = model(__lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : str = (self.image_size, self.image_size)
snake_case__ : Union[str, Any] = (self.patch_size, self.patch_size)
snake_case__ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
snake_case__ : int = self.type_sequence_label_size
snake_case__ : Any = FlaxViTForImageClassification(config=__lowerCamelCase )
snake_case__ : Union[str, Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : List[Any] = 1
snake_case__ : Dict = FlaxViTForImageClassification(__lowerCamelCase )
snake_case__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Optional[int] = model(__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) ,
) : Union[str, Any] = config_and_inputs
snake_case__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( lowerCAmelCase_ , unittest.TestCase ):
A_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Tuple = FlaxViTModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__lowerCamelCase )
snake_case__ : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : Dict ):
snake_case__ , snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
snake_case__ : Optional[Any] = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : str , **__lowerCamelCase : str ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest('JIT Enabled' ):
snake_case__ : Optional[Any] = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case__ : int = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase ( self : str ):
for model_class_name in self.all_model_classes:
snake_case__ : Optional[Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
snake_case__ : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowerCamelCase )
| 270 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ) -> int:
snake_case__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : int = 16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple = 8
else:
snake_case__ : Tuple = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case__ : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE ) == "1":
snake_case__ : Optional[int] = 2
# New Code #
snake_case__ : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Optional[Any] = config['lr']
snake_case__ : Dict = int(config['num_epochs'] )
snake_case__ : Any = int(config['seed'] )
snake_case__ : List[str] = int(config['batch_size'] )
snake_case__ : Tuple = evaluate.load('glue' , 'mrpc' )
set_seed(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Any = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case__ : str = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
snake_case__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Optional[int]:
snake_case__ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case__ : Any = parser.parse_args()
snake_case__ : Dict = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 270 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = OmegaConf.load(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
lowerCAmelCase__ :Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ :Union[str, Any] = {}
lowerCAmelCase__ :Optional[Any] = 'first_stage_model.'
for key in keys:
if key.startswith(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ :Dict = {}
lowerCAmelCase__ :List[Any] = 'model.diffusion_model.'
for key in keys:
if key.startswith(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = state_dict[key]
lowerCAmelCase__ :Tuple = config.model.params.first_stage_config.params
lowerCAmelCase__ :List[str] = config.model.params.unet_config.params
lowerCAmelCase__ :Optional[int] = VQModel(**_SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = UNetLDMModel(**_SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Any = LDMPipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
__A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 711 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """SpeechT5FeatureExtractor"""
__magic_name__ :List[Any] = """SpeechT5Tokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('text' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('text_target' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('audio_target' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = None
if audio_target is not None:
lowerCAmelCase__ :int = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :int = targets['input_values']
elif text_target is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Dict = targets['input_ids']
else:
lowerCAmelCase__ :Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Dict = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.pop('input_values' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('labels' , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase__ :Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ :List[str] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = targets['input_ids']
else:
lowerCAmelCase__ :Optional[int] = self.feature_extractor.feature_size
lowerCAmelCase__ :int = self.feature_extractor.num_mel_bins
lowerCAmelCase__ :Dict = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feature_size_hack
lowerCAmelCase__ :str = targets['input_values']
else:
lowerCAmelCase__ :Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Tuple = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 560 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowercase : Tuple ={"""UserAgent""": UserAgent().random}
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = script.contents[0]
lowerCamelCase_ : Optional[int] = json.loads(data[data.find('{\"config\"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase_ :
def __init__( self : List[str] , lowerCamelCase : Tuple ):
lowerCamelCase_ : Dict = F"https://www.instagram.com/{username}/"
lowerCamelCase_ : Dict = self.get_json()
def __a ( self : List[Any] ):
lowerCamelCase_ : List[str] = requests.get(self.url , headers=a_ ).text
lowerCamelCase_ : Optional[int] = BeautifulSoup(a_ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ):
return F"{self.__class__.__name__}(\'{self.username}\')"
def __str__( self : Any ):
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def __a ( self : int ):
return self.user_data["username"]
@property
def __a ( self : List[str] ):
return self.user_data["full_name"]
@property
def __a ( self : Any ):
return self.user_data["biography"]
@property
def __a ( self : int ):
return self.user_data["business_email"]
@property
def __a ( self : Tuple ):
return self.user_data["external_url"]
@property
def __a ( self : List[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self : Optional[int] ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self : Optional[int] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self : Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self : Any ):
return self.user_data["is_verified"]
@property
def __a ( self : Any ):
return self.user_data["is_private"]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCamelCase_ : Optional[Any] = InstagramUser(__UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,__UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : str =InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 364 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""input_features"""]
def __init__( self : int , a_ : Optional[int]=80 , a_ : Any=1_60_00 , a_ : Tuple=1_60 , a_ : Union[str, Any]=30 , a_ : int=4_00 , a_ : List[str]=0.0 , a_ : Dict=False , **a_ : Optional[Any] , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , return_attention_mask=a_ , **a_ , )
lowerCAmelCase_ : Optional[int] = n_fft
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : str = chunk_length
lowerCAmelCase_ : Optional[int] = chunk_length * sampling_rate
lowerCAmelCase_ : Any = self.n_samples // hop_length
lowerCAmelCase_ : Optional[Any] = sampling_rate
lowerCAmelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=a_ , norm="slaney" , mel_scale="slaney" , )
def lowerCamelCase ( self : Optional[int] , a_ : np.array ):
lowerCAmelCase_ : List[Any] = spectrogram(
a_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowerCAmelCase_ : Tuple = log_spec[:, :-1]
lowerCAmelCase_ : Dict = np.maximum(a_ , log_spec.max() - 8.0 )
lowerCAmelCase_ : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase ( a_ : List[np.ndarray] , a_ : List[np.ndarray] , a_ : float = 0.0 ):
if attention_mask is not None:
lowerCAmelCase_ : Tuple = np.array(a_ , np.intaa )
lowerCAmelCase_ : Dict = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
lowerCAmelCase_ : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ : Union[str, Any] = padding_value
normed_input_values.append(a_ )
else:
lowerCAmelCase_ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : bool = True , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[bool] = None , a_ : Optional[str] = "max_length" , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , **a_ : Any , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Tuple = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Tuple = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
lowerCAmelCase_ : Optional[Any] = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : List[str] = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ : Optional[int] = self.pad(
a_ , padding=a_ , max_length=max_length if max_length else self.n_samples , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ : Tuple = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowerCAmelCase_ : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowerCAmelCase_ : Dict = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowerCAmelCase_ : List[Any] = [self._np_extract_fbank_features(a_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a_ ):
lowerCAmelCase_ : Any = [np.asarray(a_ , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ : List[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 610 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=snake_case__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=snake_case__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=snake_case__ )
return parser.parse_args()
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Dict = parse_args()
# Import training_script as a module.
__snake_case :List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case :Optional[Any] = script_fpath.stem
__snake_case :Optional[int] = importlib.import_module(snake_case__ )
# Patch sys.argv
__snake_case :Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 717 |
from __future__ import annotations
import time
import numpy as np
lowerCamelCase__ = [8, 5, 9, 7]
lowerCamelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , ) -> None:
'''simple docstring'''
__snake_case :Dict = claim_vector
__snake_case :Optional[int] = allocated_resources_table
__snake_case :Optional[int] = maximum_claim_table
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(a__ ): i for i in self.__need()}
def __lowercase ( self , **a__ ) -> None:
'''simple docstring'''
__snake_case :Optional[int] = self.__need()
__snake_case :List[Any] = self.__allocated_resources_table
__snake_case :str = self.__available_resources()
__snake_case :List[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__snake_case :Dict = False
for each_need in need_list:
__snake_case :Dict = True
for index, need in enumerate(a__ ):
if need > available_resources[index]:
__snake_case :Dict = False
break
if execution:
__snake_case :Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case :List[str] = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(a__ )
# update available/freed resources stack
__snake_case :Tuple = np.array(a__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(a__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def __lowercase ( self ) -> Dict:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(a__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(a__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(a__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=10 , _lowerCamelCase=3 , _lowerCamelCase=2 , _lowerCamelCase=2 , _lowerCamelCase=2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=0.9 , _lowerCamelCase=None , ) -> List[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = tubelet_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowercase = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = VideoMAEModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = VideoMAEForPreTraining(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.batch_size , -1 ).bool()
__lowercase = model(_lowerCamelCase , _lowerCamelCase )
# model only returns predictions for masked patches
__lowercase = mask.sum().item()
__lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase : str = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Dict = False
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = VideoMAEModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> str:
'''simple docstring'''
__lowercase = copy.deepcopy(_lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.model_tester.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowercase = bool_masked_pos.to(_lowerCamelCase )
if return_labels:
if model_class in [
*get_values(_lowerCamelCase ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = VideoMAEModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowercase = len(_lowerCamelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCamelCase ) )
__lowercase = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( ) ->Optional[int]:
__lowercase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowercase = np.load(__magic_name__ )
return list(__magic_name__ )
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__lowercase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# add boolean mask, indicating which patches to mask
__lowercase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__lowercase = torch.load(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size([1, 1_408, 1_536] )
__lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowerCamelCase )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowercase = torch.tensor([0.5142] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowercase = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCamelCase ).to(
_lowerCamelCase )
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = torch.tensor(torch.tensor([0.6469] ) , device=_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCamelCase , atol=1e-4 ) )
| 118 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowercase = get_logger(__name__)
class __a ( enum.Enum ):
'''simple docstring'''
_lowerCamelCase : Tuple = """all_checks"""
_lowerCamelCase : Optional[int] = """basic_checks"""
_lowerCamelCase : str = """no_checks"""
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=None ) ->Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = " for " + verification_name if verification_name is not None else ""
if len(__magic_name__ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__magic_name__ ) > 0:
raise NonMatchingSplitsSizesError(str(__magic_name__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ = True ) ->dict:
if record_checksum:
__lowercase = shaaaa()
with open(__magic_name__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(__magic_name__ )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(__magic_name__ ), "checksum": checksum}
def lowerCAmelCase__ ( __magic_name__ ) ->List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 118 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
lowerCamelCase__ : CommonSchedulerState
# setable values
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : Optional[int] =None
@classmethod
def lowercase ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return cls(common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase )
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : DDPMSchedulerState
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Optional[int] =[e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ : jnp.dtype
@property
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
return True
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0_0_1 , lowerCamelCase = 0.0_2 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = "fixed_small" , lowerCamelCase = True , lowerCamelCase = "epsilon" , lowerCamelCase = jnp.floataa , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Optional[Any] = dtype
def lowercase ( self , lowerCamelCase = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__magic_name__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__magic_name__ : Dict = jnp.array(1.0 , dtype=self.dtype )
__magic_name__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = () ) -> DDPMSchedulerState:
"""simple docstring"""
__magic_name__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__magic_name__ : Union[str, Any] = (jnp.arange(0 , lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase , timesteps=lowerCamelCase , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = state.common.alphas_cumprod[t]
__magic_name__ : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__magic_name__ : Dict = jnp.clip(lowerCamelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__magic_name__ : str = jnp.log(jnp.clip(lowerCamelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__magic_name__ : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__magic_name__ : Any = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__magic_name__ : Optional[Any] = variance
__magic_name__ : Union[str, Any] = state.common.betas[t]
__magic_name__ : List[Any] = (predicted_variance + 1) / 2
__magic_name__ : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__magic_name__ : Dict = timestep
if key is None:
__magic_name__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ : List[str] = jnp.split(lowerCamelCase , sample.shape[1] , axis=1 )
else:
__magic_name__ : Dict = None
# 1. compute alphas, betas
__magic_name__ : Dict = state.common.alphas_cumprod[t]
__magic_name__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : int = model_output
elif self.config.prediction_type == "v_prediction":
__magic_name__ : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = jnp.clip(lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__magic_name__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__magic_name__ : Dict = jax.random.split(lowerCamelCase , num=1 )
__magic_name__ : int = jax.random.normal(lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase , lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
__magic_name__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__magic_name__ : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase , state=lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __len__( self ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 336 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(lowerCamelCase )
def lowercase ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[str] = {}
if frame_sampling_rate is not None:
__magic_name__ : Optional[int] = frame_sampling_rate
if num_frames is not None:
__magic_name__ : Optional[Any] = num_frames
__magic_name__ : Union[str, Any] = {}
if top_k is not None:
__magic_name__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=1 ) -> int:
"""simple docstring"""
if num_frames is None:
__magic_name__ : Any = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__magic_name__ : str = BytesIO(requests.get(lowerCamelCase ).content )
__magic_name__ : Optional[int] = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Tuple = num_frames * frame_sampling_rate - 1
__magic_name__ : Tuple = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__magic_name__ : Union[str, Any] = videoreader.get_batch(lowerCamelCase ).asnumpy()
__magic_name__ : List[str] = list(lowerCamelCase )
__magic_name__ : Tuple = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowercase ( self , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.model(**lowerCamelCase )
return model_outputs
def lowercase ( self , lowerCamelCase , lowerCamelCase=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__magic_name__ : Dict = self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : Tuple = model_outputs.logits.softmax(-1 )[0]
__magic_name__ , __magic_name__ : str = probs.topk(lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__magic_name__ : List[str] = scores.tolist()
__magic_name__ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 336 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a = logging.get_logger(__name__)
# General docstring
a = 'PoolFormerConfig'
# Base docstring
a = 'sail/poolformer_s12'
a = [1, 512, 7, 7]
# Image classification docstring
a = 'sail/poolformer_s12'
a = 'tabby, tabby cat'
a = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = False ) -> Tuple:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case: List[str] =1 - drop_prob
snake_case: Dict =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case: int =keep_prob + torch.rand(__UpperCAmelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case: Tuple =input.div(__UpperCAmelCase ) * random_tensor
return output
class a_ ( nn.Module ):
def __init__( self : Optional[int] , a_ : Optional[float] = None ) -> None:
super().__init__()
snake_case: Optional[int] =drop_prob
def UpperCamelCase ( self : Any , a_ : torch.Tensor ) -> torch.Tensor:
return drop_path(a_ , self.drop_prob , self.training )
def UpperCamelCase ( self : List[Any] ) -> str:
return "p={}".format(self.drop_prob )
class a_ ( nn.Module ):
def __init__( self : int , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : List[str] , a_ : List[str] , a_ : Dict , a_ : Tuple=None ) -> Dict:
super().__init__()
snake_case: Dict =patch_size if isinstance(a_ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case: Dict =stride if isinstance(a_ , collections.abc.Iterable ) else (stride, stride)
snake_case: Dict =padding if isinstance(a_ , collections.abc.Iterable ) else (padding, padding)
snake_case: Optional[Any] =nn.Convad(a_ , a_ , kernel_size=a_ , stride=a_ , padding=a_ )
snake_case: Dict =norm_layer(a_ ) if norm_layer else nn.Identity()
def UpperCamelCase ( self : Dict , a_ : List[str] ) -> Optional[int]:
snake_case: Union[str, Any] =self.projection(a_ )
snake_case: Dict =self.norm(a_ )
return embeddings
class a_ ( nn.GroupNorm ):
def __init__( self : str , a_ : Optional[Any] , **a_ : str ) -> Dict:
super().__init__(1 , a_ , **a_ )
class a_ ( nn.Module ):
def __init__( self : Optional[Any] , a_ : Union[str, Any] ) -> int:
super().__init__()
snake_case: Tuple =nn.AvgPoolad(a_ , stride=1 , padding=pool_size // 2 , count_include_pad=a_ )
def UpperCamelCase ( self : List[Any] , a_ : Any ) -> Any:
return self.pool(a_ ) - hidden_states
class a_ ( nn.Module ):
def __init__( self : Tuple , a_ : Union[str, Any] , a_ : str , a_ : Optional[Any] , a_ : Optional[Any] ) -> List[str]:
super().__init__()
snake_case: Any =nn.Convad(a_ , a_ , 1 )
snake_case: List[Any] =nn.Convad(a_ , a_ , 1 )
snake_case: Tuple =PoolFormerDropPath(a_ )
if isinstance(config.hidden_act , a_ ):
snake_case: Optional[int] =ACTaFN[config.hidden_act]
else:
snake_case: Any =config.hidden_act
def UpperCamelCase ( self : str , a_ : int ) -> int:
snake_case: int =self.conva(a_ )
snake_case: Tuple =self.act_fn(a_ )
snake_case: List[Any] =self.drop(a_ )
snake_case: Optional[Any] =self.conva(a_ )
snake_case: Tuple =self.drop(a_ )
return hidden_states
class a_ ( nn.Module ):
def __init__( self : Any , a_ : List[Any] , a_ : Any , a_ : Optional[int] , a_ : str , a_ : List[str] , a_ : Union[str, Any] ) -> str:
super().__init__()
snake_case: Dict =PoolFormerPooling(a_ )
snake_case: List[Any] =PoolFormerOutput(a_ , a_ , a_ , a_ )
snake_case: Any =PoolFormerGroupNorm(a_ )
snake_case: List[Any] =PoolFormerGroupNorm(a_ )
# Useful for training neural nets
snake_case: Any =PoolFormerDropPath(a_ ) if drop_path > 0.0 else nn.Identity()
snake_case: Union[str, Any] =config.use_layer_scale
if config.use_layer_scale:
snake_case: List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((a_) ) , requires_grad=a_ )
snake_case: List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((a_) ) , requires_grad=a_ )
def UpperCamelCase ( self : Optional[Any] , a_ : Union[str, Any] ) -> List[str]:
if self.use_layer_scale:
snake_case: int =self.pooling(self.before_norm(a_ ) )
snake_case: Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case: Tuple =hidden_states + self.drop_path(a_ )
snake_case: Optional[Any] =()
snake_case: Optional[Any] =self.output(self.after_norm(a_ ) )
snake_case: int =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case: str =hidden_states + self.drop_path(a_ )
snake_case: Dict =(output,) + outputs
return outputs
else:
snake_case: Dict =self.drop_path(self.pooling(self.before_norm(a_ ) ) )
# First residual connection
snake_case: Any =pooling_output + hidden_states
snake_case: Union[str, Any] =()
# Second residual connection inside the PoolFormerOutput block
snake_case: List[str] =self.drop_path(self.output(self.after_norm(a_ ) ) )
snake_case: Tuple =hidden_states + layer_output
snake_case: List[Any] =(output,) + outputs
return outputs
class a_ ( nn.Module ):
def __init__( self : List[Any] , a_ : int ) -> Optional[int]:
super().__init__()
snake_case: int =config
# stochastic depth decay rule
snake_case: int =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case: List[str] =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case: Union[str, Any] =nn.ModuleList(a_ )
# Transformer blocks
snake_case: List[str] =[]
snake_case: str =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case: Optional[int] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a_ ) )
snake_case: Dict =nn.ModuleList(a_ )
def UpperCamelCase ( self : Any , a_ : Tuple , a_ : str=False , a_ : int=True ) -> Dict:
snake_case: List[Any] =() if output_hidden_states else None
snake_case: Tuple =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case: str =layers
# Get patch embeddings from hidden_states
snake_case: List[str] =embedding_layer(a_ )
# Send the embeddings through the blocks
for _, blk in enumerate(a_ ):
snake_case: Optional[int] =blk(a_ )
snake_case: int =layer_outputs[0]
if output_hidden_states:
snake_case: str =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ )
class a_ ( snake_case ):
UpperCAmelCase : int = PoolFormerConfig
UpperCAmelCase : Optional[Any] = """poolformer"""
UpperCAmelCase : List[str] = """pixel_values"""
UpperCAmelCase : List[str] = True
def UpperCamelCase ( self : Optional[Any] , a_ : List[Any] ) -> int:
if isinstance(a_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCamelCase ( self : Optional[int] , a_ : Optional[Any] , a_ : Optional[Any]=False ) -> str:
if isinstance(a_ , a_ ):
snake_case: Any =value
a = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , snake_case , )
class a_ ( snake_case ):
def __init__( self : List[Any] , a_ : Dict ) -> Union[str, Any]:
super().__init__(a_ )
snake_case: List[str] =config
snake_case: Optional[int] =PoolFormerEncoder(a_ )
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : List[Any] , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
snake_case: Optional[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case: int =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case: List[str] =self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ , )
snake_case: Tuple =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a_ , hidden_states=encoder_outputs.hidden_states , )
class a_ ( nn.Module ):
def __init__( self : Optional[Any] , a_ : str ) -> Optional[int]:
super().__init__()
snake_case: str =nn.Linear(config.hidden_size , config.hidden_size )
def UpperCamelCase ( self : Dict , a_ : Tuple ) -> int:
snake_case: Tuple =self.dense(a_ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , snake_case , )
class a_ ( snake_case ):
def __init__( self : int , a_ : int ) -> Optional[int]:
super().__init__(a_ )
snake_case: List[str] =config.num_labels
snake_case: List[Any] =PoolFormerModel(a_ )
# Final norm
snake_case: Optional[Any] =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case: List[Any] =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : Optional[Any] , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[torch.LongTensor] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
snake_case: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
snake_case: int =self.poolformer(
a_ , output_hidden_states=a_ , return_dict=a_ , )
snake_case: Optional[Any] =outputs[0]
snake_case: Any =self.classifier(self.norm(a_ ).mean([-2, -1] ) )
snake_case: int =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case: Any ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case: Dict ='single_label_classification'
else:
snake_case: Dict ='multi_label_classification'
if self.config.problem_type == "regression":
snake_case: Dict =MSELoss()
if self.num_labels == 1:
snake_case: Any =loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case: Dict =loss_fct(a_ , a_ )
elif self.config.problem_type == "single_label_classification":
snake_case: str =CrossEntropyLoss()
snake_case: Dict =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case: int =BCEWithLogitsLoss()
snake_case: Union[str, Any] =loss_fct(a_ , a_ )
if not return_dict:
snake_case: Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
| 350 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a = random.Random()
def a_ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
"""simple docstring"""
if rng is None:
snake_case: Dict =global_rng
snake_case: Tuple =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a_ ( unittest.TestCase ):
def __init__( self : int , a_ : Any , a_ : str=7 , a_ : Tuple=4_0_0 , a_ : List[Any]=2_0_0_0 , a_ : str=2_0_4_8 , a_ : List[str]=1_2_8 , a_ : int=1 , a_ : Tuple=5_1_2 , a_ : Dict=3_0 , a_ : Optional[int]=4_4_1_0_0 , ) -> Union[str, Any]:
snake_case: Union[str, Any] =parent
snake_case: Optional[Any] =batch_size
snake_case: Union[str, Any] =min_seq_length
snake_case: List[str] =max_seq_length
snake_case: str =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case: int =spectrogram_length
snake_case: List[str] =feature_size
snake_case: Dict =num_audio_channels
snake_case: int =hop_length
snake_case: List[Any] =chunk_length
snake_case: Optional[Any] =sampling_rate
def UpperCamelCase ( self : Any ) -> Dict:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self : Tuple , a_ : Optional[Any]=False , a_ : Tuple=False ) -> Optional[int]:
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
snake_case: Union[str, Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case: Union[str, Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case: Any =[np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Optional[Any] = TvltFeatureExtractor
def UpperCamelCase ( self : int ) -> int:
snake_case: Union[str, Any] =TvltFeatureExtractionTester(self )
def UpperCamelCase ( self : Optional[Any] ) -> str:
snake_case: int =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(a_ , 'feature_size' ) )
self.assertTrue(hasattr(a_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(a_ , 'hop_length' ) )
self.assertTrue(hasattr(a_ , 'chunk_length' ) )
self.assertTrue(hasattr(a_ , 'sampling_rate' ) )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[Any] =feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
snake_case: str =self.feature_extraction_class.from_pretrained(a_ )
snake_case: Optional[int] =feat_extract_first.to_dict()
snake_case: List[str] =feat_extract_second.to_dict()
snake_case: Optional[int] =dict_first.pop('mel_filters' )
snake_case: List[Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : str ) -> str:
snake_case: Any =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: int =os.path.join(a_ , 'feat_extract.json' )
feat_extract_first.to_json_file(a_ )
snake_case: Union[str, Any] =self.feature_extraction_class.from_json_file(a_ )
snake_case: Optional[Any] =feat_extract_first.to_dict()
snake_case: Optional[Any] =feat_extract_second.to_dict()
snake_case: Union[str, Any] =dict_first.pop('mel_filters' )
snake_case: Union[str, Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize feature_extractor
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case: Any =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case: Optional[Any] =[np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
snake_case: List[Any] =feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case: Any =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case: Optional[Any] =feature_extractor(
a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case: List[Any] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case: List[str] =np.asarray(a_ )
snake_case: str =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self : Any , a_ : str ) -> Union[str, Any]:
snake_case: Dict =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case: Optional[int] =ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
snake_case: Tuple =self._load_datasamples(1 )
snake_case: Any =TvltFeatureExtractor()
snake_case: Any =feature_extractor(a_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
snake_case: int =torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1E-4 ) )
| 350 | 1 |
"""simple docstring"""
def lowercase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(__UpperCamelCase ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(__UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( __UpperCamelCase ) -> float:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(__UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowercase ( __UpperCamelCase ) -> Any:
return choice(__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> int:
__magic_name__ = random_pivot(__UpperCamelCase )
# partition based on pivot
# linear time
__magic_name__ = [e for e in lst if e < pivot]
__magic_name__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCamelCase ) < k - 1:
return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ConvNextModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> List[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = ConvNextConfig
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = ConvNextModelTester(self )
| 42 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase = {
'google/rembert': 256,
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Any="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Dict="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : List[Any]="[MASK]" , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def a ( self : int ) -> Union[str, Any]:
return len(self.sp_model )
def a ( self : Any ) -> str:
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = d
lowerCAmelCase__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[int]:
lowerCAmelCase__ = self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE__ )
return pieces
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
lowerCAmelCase__ = self.sp_model.decode_pieces(SCREAMING_SNAKE_CASE__ )
return out_string
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("Vocabulary path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 61 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 547 |
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """[email protected]"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Any , __lowercase : str ):
'''simple docstring'''
super().__init__()
__a = []
__a = domain
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return parse.urlparse(_SCREAMING_SNAKE_CASE ).netloc
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "https://github.com" ):
"""simple docstring"""
__a = get_domain_name(_SCREAMING_SNAKE_CASE )
# Initialize the parser
__a = Parser(_SCREAMING_SNAKE_CASE )
try:
# Open URL
__a = requests.get(_SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a = requests.get(_SCREAMING_SNAKE_CASE )
# Get the valid email.
__a = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 547 | 1 |
from __future__ import annotations
_lowerCAmelCase = """#"""
class _UpperCAmelCase :
def __init__( self ):
A_ : dict = {}
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = self._trie
for char in text:
if char not in trie:
A_ : Tuple = {}
A_ : Optional[int] = trie[char]
A_ : List[Any] = True
def _lowerCamelCase ( self , a__ ):
A_ : int = self._trie
for char in prefix:
if char in trie:
A_ : str = trie[char]
else:
return []
return self._elements(a__ )
def _lowerCamelCase ( self , a__ ):
A_ : List[str] = []
for c, v in d.items():
A_ : str = [""" """] if c == END else [(c + s) for s in self._elements(a__ )]
result.extend(a__ )
return tuple(a__ )
_lowerCAmelCase = Trie()
_lowerCAmelCase = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def _lowerCAmelCase ( ):
'''simple docstring'''
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 569 |
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''bert-generation'''
def __init__( self , a__=50358 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.02 , a__=1E-12 , a__=0 , a__=2 , a__=1 , a__="absolute" , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
A_ : List[str] = vocab_size
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Optional[Any] = initializer_range
A_ : str = layer_norm_eps
A_ : str = position_embedding_type
A_ : List[Any] = use_cache
| 569 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCamelCase = TypeVar('''T''')
__lowerCamelCase = Union[List[T], Tuple[T, ...]]
__lowerCamelCase = Union[T, List[T], Dict[str, T]]
__lowerCamelCase = Union[str, bytes, os.PathLike]
| 708 |
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__lowercase : List[str] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCamelCase_ ( _lowerCamelCase : str ):
re.sub('''<n>''' , '''''' , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) ) | 142 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__lowercase : Dict = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
__lowercase : Tuple = {
"""camembert-base""": 5_1_2,
}
__lowercase : List[str] = """▁"""
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Tuple = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Any = ["input_ids", "attention_mask"]
__lowercase :Union[str, Any] = CamembertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,) | 142 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __a :
__a : Optional[int] = XGLMConfig
__a : Dict = {}
__a : Dict = "gelu"
def __init__( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Dict=14 , __magic_name__ : str=7 , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : int=True , __magic_name__ : Optional[int]=99 , __magic_name__ : Dict=32 , __magic_name__ : List[Any]=2 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : Dict=0.0_2 , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Tuple = ffn_dim
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : List[Any] = 1
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = self.get_config()
UpperCAmelCase_ : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : str = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__a : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__a : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__a : Any = False
__a : Dict = False
__a : Union[str, Any] = False
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = TFXGLMModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , n_embd=37 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = TFXGLMModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase_ : Any = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
UpperCAmelCase_ : Union[str, Any] = model.generate(__magic_name__ , do_sample=__magic_name__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __magic_name__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCAmelCase_ : Optional[int] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCAmelCase_ : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCAmelCase_ : List[str] = model.generate(__magic_name__ , do_sample=__magic_name__ , seed=[7, 0] )
UpperCAmelCase_ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Dict = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__magic_name__ , __magic_name__ )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Union[str, Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase_ : Any = '''left'''
# use different length sentences to test batching
UpperCAmelCase_ : int = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCAmelCase_ : Tuple = tokenizer(__magic_name__ , return_tensors='''tf''' , padding=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = inputs['''input_ids''']
UpperCAmelCase_ : List[str] = model.generate(input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Any = model.generate(input_ids=__magic_name__ , max_new_tokens=12 )
UpperCAmelCase_ : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ : Dict = model.generate(input_ids=__magic_name__ , max_new_tokens=12 )
UpperCAmelCase_ : Any = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ )
UpperCAmelCase_ : Tuple = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] )
| 644 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (lowerCamelCase ):
__a : List[Any] = "openai/whisper-base"
__a : Optional[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__a : Any = "transcriber"
__a : str = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : int = ["audio"]
__a : Optional[Any] = ["text"]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return self.model.generate(inputs=__magic_name__ )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 644 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( __magic_name__ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
lowercase : List[Any] =BeautifulSoup(requests.get(__magic_name__ ).text , '''html.parser''' )
lowercase : Tuple =soup.findAll('''h1''' )
lowercase : int =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__magic_name__ , __magic_name__ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 92 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 215 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase ( ):
"""simple docstring"""
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(__UpperCamelCase )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(__UpperCamelCase )
__A = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 215 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
| 467 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( A__ ) -> List[Any]:
snake_case__ : List[Any] = 0.0_0
snake_case__ : str = 0
for resistor in resistors:
if resistor <= 0:
snake_case__ : Union[str, Any] = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(UpperCamelCase__ )
first_sum += 1 / float(UpperCamelCase__ )
index += 1
return 1 / first_sum
def UpperCamelCase__ ( A__ ) -> Dict:
snake_case__ : Dict = 0.0_0
snake_case__ : str = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case__ : Dict = F"""Resistor at index {index} has a negative value!"""
raise ValueError(UpperCamelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 699 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(__magic_name__ ) , axis=1 )
snake_case__ : List[Any] = np.sum(np.square(__magic_name__ ) , axis=0 )
snake_case__ : Dict = np.matmul(__magic_name__ , __magic_name__ )
snake_case__ : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = x.reshape(-1 , 3 )
snake_case__ : Optional[int] = squared_euclidean_distance(__magic_name__ , __magic_name__ )
return np.argmin(__magic_name__ , axis=1 )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE ) if clusters is not None else None
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : int = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Any = do_color_quantize
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Any = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : Any = rescale(image=__SCREAMING_SNAKE_CASE , scale=1 / 127.5 , data_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = image - 1
return image
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = do_resize if do_resize is not None else self.do_resize
snake_case__ : List[str] = size if size is not None else self.size
snake_case__ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Any = clusters if clusters is not None else self.clusters
snake_case__ : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case__ : Union[str, Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case__ : List[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : Optional[Any] = np.array(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = color_quantize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : List[Any] = images.shape[0]
snake_case__ : List[str] = images.reshape(__SCREAMING_SNAKE_CASE , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Optional[int] = list(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
snake_case__ : Optional[int] = {"""input_ids""": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 38 | def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->int:
'''simple docstring'''
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(a__ , a__ , a__ , a__ , index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
a__ , a__ , a__ , max_weight - weights[index] , index + 1 )
return max(a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( lowerCAmelCase__ : list[int | str] ):
create_state_space_tree(lowerCAmelCase__ , [] , 0 , [0 for i in range(len(lowerCAmelCase__ ) )] )
def __a ( lowerCAmelCase__ : list[int | str] , lowerCAmelCase__ : list[int | str] , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , ):
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
for i in range(len(lowerCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a__ : Union[str, Any] = True
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , lowerCAmelCase__ )
current_sequence.pop()
a__ : Tuple = False
__SCREAMING_SNAKE_CASE = [3, 1, 2, 4]
generate_all_permutations(sequence)
__SCREAMING_SNAKE_CASE = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 340 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = ["pixel_values"]
def __init__( self : str , A__ : bool = True , A__ : int = 3_2 , A__ : List[str]=PILImageResampling.BILINEAR , A__ : bool = True , **A__ : Tuple , ) -> None:
'''simple docstring'''
a__ : Optional[int] = do_resize
a__ : List[str] = do_rescale
a__ : Optional[int] = size_divisor
a__ : Any = resample
super().__init__(**A__ )
def __lowerCAmelCase ( self : int , A__ : np.ndarray , A__ : int , A__ : Optional[int] , A__ : Optional[ChannelDimension] = None , **A__ : List[Any] ) -> np.ndarray:
'''simple docstring'''
a__ , a__ : Optional[int] = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
a__ : List[Any] = height // size_divisor * size_divisor
a__ : List[str] = width // size_divisor * size_divisor
a__ : int = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def __lowerCAmelCase ( self : int , A__ : np.ndarray , A__ : float , A__ : Optional[ChannelDimension] = None , **A__ : Optional[int] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , A__ : Optional[bool] = None , A__ : Optional[int] = None , A__ : int=None , A__ : Optional[bool] = None , A__ : Optional[Union[TensorType, str]] = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : str , ) -> BatchFeature:
'''simple docstring'''
a__ : Tuple = do_resize if do_resize is not None else self.do_resize
a__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Tuple = size_divisor if size_divisor is not None else self.size_divisor
a__ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
a__ : int = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
a__ : Any = [to_numpy_array(A__ ) for img in images]
if do_resize:
a__ : Optional[Any] = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
a__ : str = [self.rescale(A__ , scale=1 / 2_5_5 ) for image in images]
a__ : Dict = [to_channel_dimension_format(A__ , A__ ) for image in images]
a__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 340 | 1 |
"""simple docstring"""
import argparse
import copy
def _snake_case ( __snake_case : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = {}
with open(__snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCamelCase : Optional[Any] = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCamelCase : int = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCamelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCamelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _snake_case ( __snake_case : List[Any] , __snake_case : int ):
"""simple docstring"""
with open(__snake_case ) as f:
_lowerCamelCase : str = f.read(1 )
_lowerCamelCase : Dict = start_node
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = start_node
_lowerCamelCase : Tuple = 0
while visiting not in first_solution:
_lowerCamelCase : int = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution:
_lowerCamelCase : List[Any] = k[1]
_lowerCamelCase : Any = k[0]
first_solution.append(__snake_case )
_lowerCamelCase : Optional[int] = distance_of_first_solution + int(__snake_case )
_lowerCamelCase : Any = best_node
first_solution.append(__snake_case )
_lowerCamelCase : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCamelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = []
for n in solution[1:-1]:
_lowerCamelCase : Tuple = solution.index(__snake_case )
for kn in solution[1:-1]:
_lowerCamelCase : Union[str, Any] = solution.index(__snake_case )
if n == kn:
continue
_lowerCamelCase : Optional[int] = copy.deepcopy(__snake_case )
_lowerCamelCase : Any = kn
_lowerCamelCase : str = n
_lowerCamelCase : List[str] = 0
for k in _tmp[:-1]:
_lowerCamelCase : List[Any] = _tmp[_tmp.index(__snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCamelCase : str = distance + int(i[1] )
_tmp.append(__snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCamelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _snake_case ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = 1
_lowerCamelCase : str = first_solution
_lowerCamelCase : int = []
_lowerCamelCase : str = distance_of_first_solution
_lowerCamelCase : int = solution
while count <= iters:
_lowerCamelCase : Optional[Any] = find_neighborhood(__snake_case , __snake_case )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : List[Any] = neighborhood[index_of_best_solution]
_lowerCamelCase : List[str] = len(__snake_case ) - 1
_lowerCamelCase : Optional[int] = False
while not found:
_lowerCamelCase : str = 0
while i < len(__snake_case ):
if best_solution[i] != solution[i]:
_lowerCamelCase : Optional[Any] = best_solution[i]
_lowerCamelCase : List[str] = solution[i]
break
_lowerCamelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Any = best_solution[:-1]
_lowerCamelCase : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCamelCase : Tuple = cost
_lowerCamelCase : Optional[Any] = solution
else:
_lowerCamelCase : Dict = index_of_best_solution + 1
_lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution]
if len(__snake_case ) >= size:
tabu_list.pop(0 )
_lowerCamelCase : Any = count + 1
return best_solution_ever, best_cost
def _snake_case ( __snake_case : Any=None ):
"""simple docstring"""
_lowerCamelCase : Dict = generate_neighbours(args.File )
_lowerCamelCase , _lowerCamelCase : Any = generate_first_solution(
args.File , __snake_case )
_lowerCamelCase , _lowerCamelCase : List[str] = tabu_search(
__snake_case , __snake_case , __snake_case , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 88 |
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return x if y == 0 else greatest_common_divisor(_UpperCamelCase , x % y )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (x * y) // greatest_common_divisor(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase = 20 ) -> int:
_a = 1
for i in range(1 , n + 1 ):
_a = lcm(_UpperCamelCase , _UpperCamelCase )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 487 | 0 |
'''simple docstring'''
import requests
_a : Optional[Any] = "YOUR API KEY"
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = giphy_api_key ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = "+".join(query.split() )
__UpperCAmelCase : Optional[Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__UpperCAmelCase : Union[str, Any] = requests.get(lowerCamelCase__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCAmelCase__ :List[str] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCAmelCase__ :Dict = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase__ :Dict = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 618 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ :List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase )
class __a :
_a : str
_a : str
_a : Optional[str] = None
_a : Optional[str] = None
_a : Optional[str] = None
@dataclass(frozen=UpperCAmelCase )
class __a :
_a : List[int]
_a : Optional[List[int]] = None
_a : Optional[List[int]] = None
_a : Optional[Union[int, float]] = None
_a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( UpperCAmelCase ):
_a : List[InputFeatures]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = False , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = hans_processors[task]()
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , ) , )
_UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + '.lock'
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_UpperCAmelCase = (
processor.get_dev_examples(_SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(_SCREAMING_SNAKE_CASE )
)
logger.info('Training examples: %s' , len(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = hans_convert_examples_to_features(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info('Saving features into cached file %s' , _SCREAMING_SNAKE_CASE )
torch.save(self.features , _SCREAMING_SNAKE_CASE )
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
_a : List[InputFeatures]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = False , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = hans_processors[task]()
_UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
_UpperCAmelCase = processor.get_dev_examples(_SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hans_convert_examples_to_features(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return self.dataset
def __len__( self ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.label_list
class __a ( UpperCAmelCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_SCREAMING_SNAKE_CASE , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_SCREAMING_SNAKE_CASE , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = []
for i, line in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
continue
_UpperCAmelCase = '%s-%s' % (set_type, line[0])
_UpperCAmelCase = line[5]
_UpperCAmelCase = line[6]
_UpperCAmelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
_UpperCAmelCase = line[0]
examples.append(InputExample(guid=_SCREAMING_SNAKE_CASE , text_a=_SCREAMING_SNAKE_CASE , text_b=_SCREAMING_SNAKE_CASE , label=_SCREAMING_SNAKE_CASE , pairID=_SCREAMING_SNAKE_CASE ) )
return examples
def lowerCAmelCase__ ( a__: List[InputExample] , a__: List[str] , a__: int , a__: PreTrainedTokenizer , ) -> str:
'''simple docstring'''
_UpperCAmelCase = {label: i for i, label in enumerate(a__ )}
_UpperCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(a__ ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d' % (ex_index) )
_UpperCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=a__ , max_length=a__ , padding='max_length' , truncation=a__ , return_overflowing_tokens=a__ , )
_UpperCAmelCase = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase = int(example.pairID )
features.append(InputFeatures(**a__ , label=a__ , pairID=a__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
lowerCAmelCase__ :Optional[Any] = {
'''hans''': 3,
}
lowerCAmelCase__ :Optional[int] = {
'''hans''': HansProcessor,
}
| 618 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
UpperCamelCase = ["names", "prefix"]
UpperCamelCase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
UpperCamelCase = ["encoding_errors", "on_bad_lines"]
UpperCamelCase = ["date_format"]
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case : str = ","
_snake_case : Optional[str] = None
_snake_case : Optional[Union[int, List[int], str]] = "infer"
_snake_case : Optional[List[str]] = None
_snake_case : Optional[List[str]] = None
_snake_case : Optional[Union[int, str, List[int], List[str]]] = None
_snake_case : Optional[Union[List[int], List[str]]] = None
_snake_case : Optional[str] = None
_snake_case : bool = True
_snake_case : Optional[Literal["c", "python", "pyarrow"]] = None
_snake_case : Dict[Union[int, str], Callable[[Any], Any]] = None
_snake_case : Optional[list] = None
_snake_case : Optional[list] = None
_snake_case : bool = False
_snake_case : Optional[Union[int, List[int]]] = None
_snake_case : Optional[int] = None
_snake_case : Optional[Union[str, List[str]]] = None
_snake_case : bool = True
_snake_case : bool = True
_snake_case : bool = False
_snake_case : bool = True
_snake_case : Optional[str] = None
_snake_case : str = "."
_snake_case : Optional[str] = None
_snake_case : str = '"'
_snake_case : int = 0
_snake_case : Optional[str] = None
_snake_case : Optional[str] = None
_snake_case : Optional[str] = None
_snake_case : Optional[str] = None
_snake_case : bool = True
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = True
_snake_case : bool = False
_snake_case : Optional[str] = None
_snake_case : int = 10_000
_snake_case : Optional[datasets.Features] = None
_snake_case : Optional[str] = "strict"
_snake_case : Literal["error", "warn", "skip"] = "error"
_snake_case : Optional[str] = None
def __a ( self :Optional[Any] ):
if self.delimiter is not None:
UpperCamelCase__ :List[str] = self.delimiter
if self.column_names is not None:
UpperCamelCase__ :int = self.column_names
@property
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[str] = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case : Optional[Any] = CsvConfig
def __a ( self :int ):
return datasets.DatasetInfo(features=self.config.features )
def __a ( self :int , lowerCamelCase__ :Dict ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase__ :int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ , (str, list, tuple) ):
UpperCamelCase__ :int = data_files
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :int = [files]
UpperCamelCase__ :Dict = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase__ :List[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :Dict = [files]
UpperCamelCase__ :str = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def __a ( self :List[Any] , lowerCamelCase__ :pa.Table ):
if self.config.features is not None:
UpperCamelCase__ :Union[str, Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
UpperCamelCase__ :Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCamelCase__ :Union[str, Any] = table_cast(lowerCamelCase__ , lowerCamelCase__ )
return pa_table
def __a ( self :List[Any] , lowerCamelCase__ :Any ):
UpperCamelCase__ :Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCamelCase__ :Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
UpperCamelCase__ :Optional[int] = pd.read_csv(lowerCamelCase__ , iterator=lowerCamelCase__ , dtype=lowerCamelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Any = pa.Table.from_pandas(lowerCamelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" )
raise | 383 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def A ( lowercase__ : str ) -> str:
UpperCamelCase__ :int = {}
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = json.load(lowercase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
import xla_spawn
UpperCamelCase__ :Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :int = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
UpperCamelCase__ :Any = time()
xla_spawn.main()
UpperCamelCase__ :Optional[Any] = time()
UpperCamelCase__ :Optional[Any] = get_results(lowerCamelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __a ( self :Union[str, Any] ):
import xla_spawn
UpperCamelCase__ :List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
xla_spawn.main() | 383 | 1 |
def lowerCAmelCase_ ( lowerCamelCase = 1000 ):
__magic_name__ : Optional[int] =2**power
__magic_name__ : int =0
while n:
__magic_name__ , __magic_name__ : List[str] =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 21 |
UpperCAmelCase_ : int = range(2, 20 + 1)
UpperCAmelCase_ : Tuple = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase_ : dict[int, dict[int, list[list[int]]]] = {}
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
__magic_name__ : Any =sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
__magic_name__ , __magic_name__ : Tuple =0, 0
__magic_name__ : Optional[Any] =n - i
__magic_name__ : Union[str, Any] =memo.get(lowerCamelCase )
if sub_memo is not None:
__magic_name__ : int =sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
__magic_name__ : Dict =-1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__magic_name__ : Optional[Any] =_k
break
if max_jump >= 0:
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =jumps[max_jump]
# since the difference between jumps is cached, add c
__magic_name__ : Tuple =diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
__magic_name__ , __magic_name__ : Tuple =divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__magic_name__ : str =[]
else:
__magic_name__ : List[str] ={c: []}
__magic_name__ : List[str] =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__magic_name__ , __magic_name__ : Union[str, Any] =next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__magic_name__ , __magic_name__ : Optional[int] =compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
__magic_name__ : Tuple =sub_memo[c]
# keep jumps sorted by # of terms skipped
__magic_name__ : List[Any] =0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__magic_name__ : Tuple =i
__magic_name__ , __magic_name__ , __magic_name__ : Tuple =0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__magic_name__ : Optional[Any] =ds_c + ds_b
diff += addend
__magic_name__ : str =0
for j in range(lowerCamelCase ):
__magic_name__ : int =a_i[j] + addend
__magic_name__ , __magic_name__ : Any =divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
__magic_name__ : Tuple =digits[j] + addend
if s >= 10:
__magic_name__ , __magic_name__ : int =divmod(lowerCamelCase , 10 )
__magic_name__ : int =addend // 10 + quotient
else:
__magic_name__ : Dict =s
__magic_name__ : Any =addend // 10
if addend == 0:
break
while addend > 0:
__magic_name__ , __magic_name__ : Union[str, Any] =divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase = 10**15 ):
__magic_name__ : List[str] =[1]
__magic_name__ : str =1
__magic_name__ : str =0
while True:
__magic_name__ , __magic_name__ : List[str] =next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
__magic_name__ : int =0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
UpperCAmelCase__ = '''vision-encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self :List[Any] , **_lowerCamelCase :List[Any] ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCamelCase_ : List[Any] =kwargs.pop('encoder' )
UpperCamelCase_ : List[Any] =encoder_config.pop('model_type' )
UpperCamelCase_ : Tuple =kwargs.pop('decoder' )
UpperCamelCase_ : List[str] =decoder_config.pop('model_type' )
UpperCamelCase_ : Any =AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : Optional[int] =AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : List[str] =True
@classmethod
def lowerCamelCase_ ( cls :int , _lowerCamelCase :PretrainedConfig , _lowerCamelCase :PretrainedConfig , **_lowerCamelCase :Any ):
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCamelCase_ : List[Any] =True
UpperCamelCase_ : List[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =copy.deepcopy(self.__dict__ )
UpperCamelCase_ : Union[str, Any] =self.encoder.to_dict()
UpperCamelCase_ : Dict =self.decoder.to_dict()
UpperCamelCase_ : List[str] =self.__class__.model_type
return output
class a__ ( A__ ):
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class a__ ( A__ ):
@property
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : int =OrderedDict()
UpperCamelCase_ : Optional[Any] ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ : Any ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
UpperCamelCase_ : Optional[Any] ={0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :"PreTrainedTokenizerBase" , _lowerCamelCase :int = -1 , _lowerCamelCase :int = -1 , _lowerCamelCase :bool = False , _lowerCamelCase :Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
UpperCamelCase_ : Union[str, Any] =OrderedDict()
UpperCamelCase_ : List[str] =super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
UpperCamelCase_ : Optional[int] =dummy_input['input_ids'].shape
UpperCamelCase_ : Optional[Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_ : Tuple =dummy_input.pop('input_ids' )
UpperCamelCase_ : List[Any] =dummy_input.pop('attention_mask' )
UpperCamelCase_ : str =torch.zeros(_lowerCamelCase )
return common_inputs
class a__ ( A__ ):
@property
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :int , _lowerCamelCase :PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :PretrainedConfig , _lowerCamelCase :PretrainedConfig , _lowerCamelCase :str = "default" ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 715 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
def __init__( self :List[str] , *_lowerCamelCase :int , **_lowerCamelCase :str ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 395 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : str , __snake_case : List[str]=3 , __snake_case : Optional[int]=32 , __snake_case : List[Any]=3 , __snake_case : Dict=10 , __snake_case : Tuple=[10, 20, 30, 40] , __snake_case : str=[1, 1, 2, 1] , __snake_case : Any=True , __snake_case : Any=True , __snake_case : List[Any]="relu" , __snake_case : Union[str, Any]=3 , __snake_case : List[str]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(__snake_case )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self : List[Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : int , __snake_case : Optional[int] , __snake_case : str ):
UpperCAmelCase_ = FlaxRegNetModel(config=__snake_case )
UpperCAmelCase_ = model(__snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , __snake_case : List[Any] , __snake_case : List[str] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=__snake_case )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase : Tuple = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = FlaxRegNetModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def lowerCamelCase_ ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
return
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__snake_case )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCamelCase_ ( self : str ):
def check_hidden_states_output(__snake_case : List[str] , __snake_case : Tuple , __snake_case : str ):
UpperCAmelCase_ = model_class(__snake_case )
UpperCAmelCase_ = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__snake_case , __snake_case )
UpperCAmelCase_ = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case : Dict , **__snake_case : str ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = model_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ) -> str:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__snake_case , return_tensors='''np''' )
UpperCAmelCase_ = model(**__snake_case )
# verify the logits
UpperCAmelCase_ = (1, 10_00)
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_UpperCamelCase : List[str] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_UpperCamelCase : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
_UpperCamelCase : Optional[int] = primes[:idx]
break
_UpperCamelCase , _UpperCamelCase : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_UpperCamelCase : List[str] = False
for r in range(__lowerCAmelCase ):
_UpperCamelCase : List[Any] = pow(__lowerCAmelCase , d * 2**r , __lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_UpperCamelCase : Union[str, Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 239 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """data2vec-text"""
def __init__(self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Dict = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Tuple = num_attention_heads
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : List[Any] = type_vocab_size
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : List[Any] = use_cache
_UpperCamelCase : str = classifier_dropout
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def lowercase_ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 239 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a : List[Any] = logging.get_logger(__name__)
a : str = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Any = 'imagegpt'
a : Optional[Any] = ['past_key_values']
a : Tuple = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Dict=512 + 1 , __lowercase : int=32 * 32 , __lowercase : List[str]=512 , __lowercase : Dict=24 , __lowercase : Optional[int]=8 , __lowercase : Tuple=None , __lowercase : Tuple="quick_gelu" , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=1e-5 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : Union[str, Any]=False , __lowercase : Any=False , **__lowercase : Optional[Any] , ) -> List[Any]:
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Union[str, Any] = n_positions
__UpperCAmelCase : int = n_embd
__UpperCAmelCase : int = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = n_inner
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : Tuple = resid_pdrop
__UpperCAmelCase : Optional[int] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : List[str] = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Optional[int] = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : Optional[int] = reorder_and_upcast_attn
__UpperCAmelCase : Optional[int] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase )
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : "FeatureExtractionMixin" , __lowercase : int = 1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 32 , __lowercase : int = 32 , ) -> Mapping[str, Any]:
__UpperCAmelCase : str = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Any = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
return inputs
| 63 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = 'T5Config'
def a__ ( lowercase : jnp.array, lowercase : int, lowercase : int ) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.zeros_like(lowercase )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(lowercase )
_UpperCamelCase = jnp.where(shifted_input_ids == -100, lowercase, lowercase )
return shifted_input_ids
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'mt5'
_snake_case : Union[str, Any] = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'mt5'
_snake_case : int = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'mt5'
_snake_case : Optional[Any] = MTaConfig
| 98 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowercase_ = 'path-to-your-trained-model'
lowercase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
lowercase_ = 'A photo of sks dog in a bucket'
lowercase_ = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 380 |
from math import pow, sqrt
def a ( *A__ : float ) -> bool:
"""simple docstring"""
_lowercase =len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def a ( A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def a ( A__ : float , A__ : float , A__ : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 380 | 1 |
from math import factorial
def a__ ( _UpperCamelCase : int = 1_00 ):
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 175 | from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = SMALL_MODEL_IDENTIFIER
lowerCAmelCase = '''pt'''
lowerCAmelCase = '''tf'''
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=__SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = FeaturesManager.determine_framework(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = MagicMock(return_value=__SCREAMING_SNAKE_CASE )
with patch('''transformers.onnx.features.is_tf_available''' , __SCREAMING_SNAKE_CASE ), patch(
'''transformers.onnx.features.is_torch_available''' , __SCREAMING_SNAKE_CASE ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 312 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,) -> List[str]:
snake_case__ :Optional[Any] = size if size is not None else {"height": 18, "width": 18}
snake_case__ :Dict = parent
snake_case__ :str = batch_size
snake_case__ :int = num_channels
snake_case__ :Optional[Any] = image_size
snake_case__ :str = min_resolution
snake_case__ :Optional[Any] = max_resolution
snake_case__ :List[str] = do_resize
snake_case__ :Dict = size
snake_case__ :List[str] = apply_ocr
def lowerCAmelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( _A , unittest.TestCase ):
_A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Dict = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"apply_ocr" ) )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
snake_case__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def lowerCAmelCase_ ( self ) -> int:
pass
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
self.assertIsInstance(encoding.words ,UpperCamelCase )
self.assertIsInstance(encoding.boxes ,UpperCamelCase )
# Test batched
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :Dict = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :int = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# with apply_OCR = True
snake_case__ :str = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ :Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" ,split="test" )
snake_case__ :str = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case__ :List[Any] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ :str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case__ :Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,UpperCamelCase )
self.assertListEqual(encoding.boxes ,UpperCamelCase )
# with apply_OCR = False
snake_case__ :Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase )
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) ) | 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask | 57 | 0 |
'''simple docstring'''
from __future__ import annotations
class a_ :
def __init__( self : Optional[Any] , lowercase : str , lowercase : str ):
"""simple docstring"""
lowercase_ , lowercase_ :int = text, pattern
lowercase_ , lowercase_ :Any = len(lowercase ), len(lowercase )
def lowercase__ ( self : Optional[int] , lowercase : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase_ :Union[str, Any] = self.mismatch_in_text(lowercase )
if mismatch_index == -1:
positions.append(lowercase )
else:
lowercase_ :Tuple = self.match_in_pattern(self.text[mismatch_index] )
lowercase_ :List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase : List[Any] ='''ABAABA'''
lowerCAmelCase : Any ='''AB'''
lowerCAmelCase : Optional[Any] =BoyerMooreSearch(text, pattern)
lowerCAmelCase : Any =bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 172 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowercase_ :Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase_ :Optional[int] = 1
if upper_limit > 0:
lowercase_ :str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(__lowerCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCAmelCase : Optional[int] =int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 172 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase =Features({'''image''': Image()} )
lowerCamelCase =Features({'''labels''': ClassLabel} )
lowerCamelCase ='''image'''
lowerCamelCase ='''labels'''
def lowercase_( self : int , lowerCamelCase : int ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowerCamelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__A : Optional[Any] = copy.deepcopy(self )
__A : int = self.label_schema.copy()
__A : List[str] = features[self.label_column]
__A : Dict = label_schema
return task_template
@property
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 704 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__A : Tuple = generate_pascal_triangle(__SCREAMING_SNAKE_CASE )
for row_idx in range(__SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__A : list[list[int]] = []
for current_row_idx in range(__SCREAMING_SNAKE_CASE ):
__A : Tuple = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
triangle.append(__SCREAMING_SNAKE_CASE )
return triangle
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__A : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__A , __A : Tuple = 1, 1
for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ):
calculate_current_element(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return current_row
def A_ ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ) -> None:
"""simple docstring"""
__A : str = triangle[current_row_idx - 1][current_col_idx - 1]
__A : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
__A : Any = above_to_left_elt + above_to_right_elt
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__A : list[list[int]] = [[1]]
for row_index in range(1 , __SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = [0] + result[-1] + [0]
__A : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
__A : int = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) )
__A : str = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__A : Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__A : Any = row_first_half + row_second_half
result.append(__SCREAMING_SNAKE_CASE )
return result
def A_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None:
__A : List[Any] = F"{func.__name__}({value})"
__A : Dict = timeit(F"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 499 | 0 |
"""simple docstring"""
_snake_case = {str(digit): digit**5 for digit in range(1_0)}
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE ) )
def __snake_case ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 580 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=0 , ) -> int:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = projection_dim
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_lowerCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRContextEncoder(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = TFDPRReader(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_: List[str] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: List[str] = False
SCREAMING_SNAKE_CASE_: List[str] = False
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = TFDPRModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def __lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_lowerCAmelCase = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowerCAmelCase = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowerCAmelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 580 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE ( snake_case):
return 1.0 / (1.0 + np.exp(-_outputs))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = np.max(_outputs, axis=-1, keepdims=snake_case)
__snake_case = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=snake_case)
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''sigmoid'''
UpperCamelCase_ : Any = '''softmax'''
UpperCamelCase_ : List[str] = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = ClassificationFunction.NONE
def __init__( self : Union[str, Any] , **A_ : int ) -> Tuple:
super().__init__(**A_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase ( self : str , A_ : int=None , A_ : str=None , A_ : Any="" , **A_ : Union[str, Any] ) -> str:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__snake_case = tokenizer_kwargs
__snake_case = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
__snake_case = self.model.config.return_all_scores
if isinstance(A_ , A_ ) or top_k is None:
__snake_case = top_k
__snake_case = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , A_ , )
if return_all_scores:
__snake_case = None
else:
__snake_case = 1
if isinstance(A_ , A_ ):
__snake_case = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Any , *A_ : Optional[int] , **A_ : Tuple ) -> Optional[int]:
__snake_case = super().__call__(*A_ , **A_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case = '''top_k''' not in kwargs
if isinstance(args[0] , A_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase ( self : Optional[Any] , A_ : Tuple , **A_ : Optional[Any] ) -> Dict[str, GenericTensor]:
__snake_case = self.framework
if isinstance(A_ , A_ ):
return self.tokenizer(**A_ , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ) and len(A_ ) == 1 and isinstance(inputs[0] , A_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A_ , **A_ )
elif isinstance(A_ , A_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(A_ , return_tensors=A_ , **A_ )
def lowercase ( self : Optional[int] , A_ : Optional[Any] ) -> Optional[int]:
return self.model(**A_ )
def lowercase ( self : Any , A_ : str , A_ : Dict=None , A_ : Tuple=1 , A_ : Optional[Any]=True ) -> Union[str, Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
__snake_case = self.model.config.function_to_apply
else:
__snake_case = ClassificationFunction.NONE
__snake_case = model_outputs['''logits'''][0]
__snake_case = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case = sigmoid(A_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case = softmax(A_ )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(A_ )
]
if not _legacy:
dict_scores.sort(key=lambda A_ : x["score"] , reverse=A_ )
if top_k is not None:
__snake_case = dict_scores[:top_k]
return dict_scores | 709 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : Dict = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure) | 93 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
UpperCAmelCase_ =precision
UpperCAmelCase_ =ceil(precision / 1_4 )
UpperCAmelCase_ =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
UpperCAmelCase_ =1
UpperCAmelCase_ =1_3_5_9_1_4_0_9
UpperCAmelCase_ =Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
UpperCAmelCase_ =factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowercase : List[str] =50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 54 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowercase ( A , A , unittest.TestCase ):
__magic_name__ : List[str] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ : Optional[Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ : Optional[int] = False
__magic_name__ : List[Any] = False
def lowerCAmelCase_ ( self , a__ , a__ , a__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowercase ( A ):
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=9_9 , a__=3_2 , a__=3_2 , a__=2 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ) -> List[str]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = embedding_size
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
A_ = TFMobileBertModel(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
A_ = [input_ids, input_mask]
A_ = model(a__ )
A_ = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = TFMobileBertForMaskedLM(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
'''simple docstring'''
A_ = TFMobileBertForNextSentencePrediction(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForSequenceClassification(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = self.num_choices
A_ = TFMobileBertForMultipleChoice(config=a__ )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForTokenClassification(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = TFMobileBertForQuestionAnswering(config=a__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ = ConfigTester(self , config_class=a__ , hidden_size=3_7 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ = TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(a__ )[0]
A_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , a__ )
A_ = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-4 ) | 141 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = '\\n Text data.\n Second line of data.'
__SCREAMING_SNAKE_CASE : List[str] = 'file'
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / """cache"""
snake_case_ = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = """custom_cache"""
snake_case_ = """custom_extracted_dir"""
snake_case_ = tmp_path / """custom_extracted_path"""
if default_extracted:
snake_case_ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
snake_case_ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# absolute path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
snake_case_ = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
# absolute path
snake_case_ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
snake_case_ = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( ) -> Tuple:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" )
| 2 |
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCamelCase__ ( _a ):
a : Optional[int] = """beit"""
def __init__( self : int , A_ : List[str]=8_1_9_2 , A_ : Tuple=7_6_8 , A_ : str=1_2 , A_ : Optional[Any]=1_2 , A_ : List[str]=3_0_7_2 , A_ : Any="gelu" , A_ : List[Any]=0.0 , A_ : Optional[int]=0.0 , A_ : List[str]=0.02 , A_ : Tuple=1e-1_2 , A_ : str=2_2_4 , A_ : str=1_6 , A_ : Any=3 , A_ : str=False , A_ : List[str]=False , A_ : Any=False , A_ : Dict=False , A_ : Optional[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[Any]=True , A_ : Any=[3, 5, 7, 1_1] , A_ : Dict=[1, 2, 3, 6] , A_ : List[str]=True , A_ : Optional[int]=0.4 , A_ : Optional[int]=2_5_6 , A_ : int=1 , A_ : Tuple=False , A_ : List[Any]=2_5_5 , **A_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = use_mask_token
__lowercase = use_absolute_position_embeddings
__lowercase = use_relative_position_bias
__lowercase = use_shared_relative_position_bias
__lowercase = layer_scale_init_value
__lowercase = drop_path_rate
__lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase = out_indices
__lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase = use_auxiliary_head
__lowercase = auxiliary_loss_weight
__lowercase = auxiliary_channels
__lowercase = auxiliary_num_convs
__lowercase = auxiliary_concat_input
__lowercase = semantic_loss_ignore_index
class lowerCamelCase__ ( _a ):
a : int = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return 1e-4
| 616 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
| 616 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_UpperCAmelCase ):
_A : Tuple = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : int ):
requires_backends(self ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : int ):
requires_backends(cls ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : str ):
requires_backends(cls ,["flax", "transformers"] )
class _snake_case ( metaclass=_UpperCAmelCase ):
_A : List[str] = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Any ):
requires_backends(self ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(cls ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
requires_backends(cls ,["flax", "transformers"] )
class _snake_case ( metaclass=_UpperCAmelCase ):
_A : Any = ['''flax''', '''transformers''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
requires_backends(self ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : int ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Any ):
requires_backends(cls ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : str ):
requires_backends(cls ,["flax", "transformers"] )
class _snake_case ( metaclass=_UpperCAmelCase ):
_A : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(self ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any] ):
requires_backends(cls ,["flax", "transformers"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["flax", "transformers"] )
| 708 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def A_ ( snake_case ):
@wraps(snake_case )
def _inner_fn(*snake_case , **snake_case ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 465 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __magic_name__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int = None ,_UpperCAmelCase : int = None ):
super().__init__()
_a : int = pad_token_id
_a : Tuple = max_length
_a : Any = vocab
_a : Optional[int] = merges
_a : Dict = BytePairTokenizer(_UpperCAmelCase ,_UpperCAmelCase ,sequence_length=_UpperCAmelCase )
@classmethod
def __lowercase ( cls : Tuple ,_UpperCAmelCase : GPTaTokenizer ,*_UpperCAmelCase : List[str] ,**_UpperCAmelCase : int ):
_a : Union[str, Any] = [' '.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
_a : Any = tokenizer.get_vocab()
return cls(_UpperCAmelCase ,_UpperCAmelCase ,*_UpperCAmelCase ,**_UpperCAmelCase )
@classmethod
def __lowercase ( cls : int ,_UpperCAmelCase : Union[str, os.PathLike] ,*_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Optional[Any] ):
_a : Dict = GPTaTokenizer.from_pretrained(_UpperCAmelCase ,*_UpperCAmelCase ,**_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase ,*_UpperCAmelCase ,**_UpperCAmelCase )
@classmethod
def __lowercase ( cls : Dict ,_UpperCAmelCase : Any ):
return cls(**_UpperCAmelCase )
def __lowercase ( self : int ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int = None ):
_a : List[str] = self.tf_tokenizer(_UpperCAmelCase )
_a : Any = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
_a : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_a , _a : int = pad_model_inputs(
_UpperCAmelCase ,max_seq_length=_UpperCAmelCase ,pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 358 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : int = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Union[str, Any] = self.dummy_uncond_unet
_a : Union[str, Any] = DDIMScheduler()
_a : Union[str, Any] = self.dummy_vq_model
_a : List[Any] = LDMPipeline(unet=_UpperCAmelCase ,vqvae=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = torch.manual_seed(0 )
_a : int = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ).images
_a : Tuple = torch.manual_seed(0 )
_a : List[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_UpperCAmelCase )[0]
_a : Any = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Union[str, Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
_a : Dict = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=5 ,output_type='numpy' ).images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Union[str, Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_a : List[str] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 358 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = BioGptTokenizer
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
_UpperCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(lowerCAmelCase_))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(lowerCAmelCase_))
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file)
_UpperCamelCase = '''lower'''
_UpperCamelCase = ['''low''', '''er</w>''']
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
_UpperCamelCase = tokens + ['''<unk>''']
_UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 711 |
"""simple docstring"""
import json
import sys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
_UpperCamelCase = results[benchmark_name]
_UpperCamelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCamelCase = '''| metric |'''
_UpperCamelCase = '''|--------|'''
_UpperCamelCase = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
_UpperCamelCase = benchmark_res[metric_name]
_UpperCamelCase = metric_vals['''new''']
_UpperCamelCase = metric_vals.get('''old''', __snake_case )
_UpperCamelCase = metric_vals.get('''diff''', __snake_case )
_UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
_a = sys.argv[1]
_a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Tuple = BigBirdTokenizer
__A : List[str] = BigBirdTokenizerFast
__A : List[Any] = True
__A : List[Any] = True
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = self.tokenizer_class(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = '''<s>'''
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCamelCase ) , 1004 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase = tokenizer.tokenize(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(_UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = BigBirdTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = '''Hello World!'''
_UpperCAmelCase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
_UpperCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def UpperCamelCase( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = ''' '''.join(_UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCamelCase )
_UpperCAmelCase = BigBirdConfig(attention_type='''original_full''' )
_UpperCAmelCase = BigBirdModel(_UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
_UpperCAmelCase = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def UpperCamelCase( self ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , ) | 32 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A__ : Union[str, Any] = random.Random()
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=1.0 ,__UpperCamelCase : Dict=None ,__UpperCamelCase : List[Any]=None ):
if rng is None:
lowerCAmelCase_ : List[Any] = global_rng
lowerCAmelCase_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __snake_case ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A_ : Dict , A_ : Optional[Any]=7 , A_ : Union[str, Any]=4_0_0 , A_ : Dict=2_0_0_0 , A_ : Dict=1 , A_ : Optional[Any]=0.0 , A_ : Tuple=1_6_0_0_0 , A_ : Any=True , A_ : Any=8_0 , A_ : str=1_6 , A_ : Union[str, Any]=6_4 , A_ : List[Any]="hann_window" , A_ : Union[str, Any]=8_0 , A_ : Dict=7_6_0_0 , A_ : List[str]=1e-10 , A_ : Union[str, Any]=True , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : str = min_seq_length
lowerCAmelCase_ : Optional[int] = max_seq_length
lowerCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : List[Any] = feature_size
lowerCAmelCase_ : Any = padding_value
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : str = do_normalize
lowerCAmelCase_ : str = num_mel_bins
lowerCAmelCase_ : List[str] = hop_length
lowerCAmelCase_ : Tuple = win_length
lowerCAmelCase_ : Tuple = win_function
lowerCAmelCase_ : Optional[int] = fmin
lowerCAmelCase_ : List[str] = fmax
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : Optional[Any] = return_attention_mask
def UpperCAmelCase__ ( self : List[str]):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase__ ( self : Optional[int] , A_ : Any=False , A_ : List[str]=False):
def _flatten(A_ : Tuple):
return list(itertools.chain(*A_))
if equal_length:
lowerCAmelCase_ : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Dict = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCAmelCase_ : Optional[Any] = [np.asarray(A_) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[Any]=False , A_ : Dict=False):
if equal_length:
lowerCAmelCase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Tuple = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCAmelCase_ : Optional[int] = [np.asarray(A_) for x in speech_inputs]
return speech_inputs
@require_torch
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = SpeechTaFeatureExtractor
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = SpeechTaFeatureExtractionTester(self)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
self.assertTrue(np.all(np.mean(A_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0) - 1) < 1e-3))
def UpperCAmelCase__ ( self : List[Any]):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Optional[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Any = [np.asarray(A_) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : Tuple = feat_extract(speech_inputs[0] , return_tensors='''np''').input_values
lowerCAmelCase_ : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test batched
lowerCAmelCase_ : Any = feat_extract(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : Optional[int] = feat_extract(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : Optional[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase_ : str = [None, 1_6_0_0, None]
for max_length, padding in zip(A_ , A_):
lowerCAmelCase_ : str = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''')
lowerCAmelCase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : Dict = range(8_0_0 , 1_4_0_0 , 2_0_0)
lowerCAmelCase_ : Union[str, Any] = [floats_list((1, x))[0] for x in lengths]
lowerCAmelCase_ : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase_ : Optional[Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(A_ , A_):
lowerCAmelCase_ : Optional[int] = feat_extract(A_ , max_length=A_ , padding=A_)
lowerCAmelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : List[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : int = feat_extract(
A_ , truncation=A_ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''')
lowerCAmelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : str = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Any = feat_extract(
A_ , truncation=A_ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''')
lowerCAmelCase_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0))
lowerCAmelCase_ : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : List[Any] = feat_extract(
A_ , truncation=A_ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''')
lowerCAmelCase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0))
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : List[str] = np.random.rand(1_0_0).astype(np.floataa)
lowerCAmelCase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
lowerCAmelCase_ : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def UpperCAmelCase__ ( self : Optional[int]):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : str = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Optional[int] = [np.asarray(A_) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase_ : Optional[Any] = feature_extractor(audio_target=A_ , padding=A_ , return_tensors='''np''').input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
lowerCAmelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors='''np''').input_values
lowerCAmelCase_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test batched
lowerCAmelCase_ : Optional[int] = feature_extractor(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : int = feature_extractor(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Tuple = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase_ : Any = np.asarray(A_)
lowerCAmelCase_ : Any = feature_extractor(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : int = feature_extractor(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Any = feat_extract.model_input_names[0]
lowerCAmelCase_ : Dict = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(A_) == len(A_) for x, y in zip(A_ , processed_features[input_name])))
lowerCAmelCase_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_)
lowerCAmelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''')
lowerCAmelCase_ : Dict = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCAmelCase_ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_)
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''')
lowerCAmelCase_ : Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCAmelCase_ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : str = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : List[Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')[input_name]
lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''pt''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.feat_extract_dict
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**A_)
lowerCAmelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : str = [len(A_) for x in speech_inputs]
lowerCAmelCase_ : List[str] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Dict = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : str = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')
self.assertIn('''attention_mask''' , A_)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = self.feat_extract_dict
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Any = self.feature_extraction_class(**A_)
lowerCAmelCase_ : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : Tuple = [len(A_) for x in speech_inputs]
lowerCAmelCase_ : Tuple = feat_extract.model_input_names[0]
lowerCAmelCase_ : str = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : int = min(A_)
lowerCAmelCase_ : Dict = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : str = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , truncation=A_ , return_tensors='''np''')
self.assertIn('''attention_mask''' , A_)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def UpperCAmelCase__ ( self : int , A_ : Optional[int]):
from datasets import load_dataset
lowerCAmelCase_ : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
lowerCAmelCase_ : Optional[Any] = ds.sort('''id''').select(range(A_))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Any):
# fmt: off
lowerCAmelCase_ : Optional[int] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03])
# fmt: on
lowerCAmelCase_ : Tuple = self._load_datasamples(1)
lowerCAmelCase_ : List[str] = SpeechTaFeatureExtractor()
lowerCAmelCase_ : List[Any] = feature_extractor(A_ , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0))
self.assertTrue(torch.allclose(input_values[0, :3_0] , A_ , atol=1e-6))
def UpperCAmelCase__ ( self : Optional[int]):
# fmt: off
lowerCAmelCase_ : Tuple = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998])
# fmt: on
lowerCAmelCase_ : Union[str, Any] = self._load_datasamples(1)
lowerCAmelCase_ : Any = SpeechTaFeatureExtractor()
lowerCAmelCase_ : Dict = feature_extractor(audio_target=A_ , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , A_ , atol=1e-4))
| 171 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCAmelCase_ = 'facebook/wmt19-en-de'
lowerCAmelCase_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCAmelCase_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCAmelCase_ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCAmelCase_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
lowerCAmelCase_ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
lowerCAmelCase_ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
def __UpperCAmelCase ( self , _lowerCAmelCase ):
raise NotImplementedError()
def __UpperCAmelCase ( self ):
raise NotImplementedError()
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , **_lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = tokenizer
UpperCAmelCase__ : Dict = skip_prompt
UpperCAmelCase__ : Union[str, Any] = decode_kwargs
# variables used in the streaming process
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Tuple = True
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
UpperCAmelCase__ : Optional[int] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
UpperCAmelCase__ : int = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
UpperCAmelCase__ : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
UpperCAmelCase__ : Optional[Any] = text[self.print_len :]
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Union[str, Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
UpperCAmelCase__ : str = text[self.print_len :]
self.print_len += len(_lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
UpperCAmelCase__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_lowerCAmelCase )
self.on_finalized_text(_lowerCAmelCase )
def __UpperCAmelCase ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
UpperCAmelCase__ : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
UpperCAmelCase__ : Tuple = text[self.print_len :]
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : Dict = """"""
UpperCAmelCase__ : str = True
self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False ):
print(_lowerCAmelCase , flush=_lowerCAmelCase , end="""""" if not stream_end else None )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = Queue()
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Tuple = timeout
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False ):
self.text_queue.put(_lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 79 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 0 |
import math
def UpperCamelCase ( snake_case__):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( snake_case__ = 1_00_01):
try:
lowerCAmelCase_ : List[Any] = int(snake_case__)
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int.") from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one.")
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : List[str] = 2
while len(snake_case__) < nth:
if is_prime(snake_case__):
primes.append(snake_case__)
num += 1
else:
num += 1
return primes[len(snake_case__) - 1]
if __name__ == "__main__":
print(f"{solution() = }")
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a__ : int ='''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a__ : str ='''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a__ : Any ='''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _lowerCamelCase ( self : List[Any] , __A : Union[str, Any] , __A : str , __A : List[str]=None , __A : List[str]=True , __A : Optional[Any]=False ):
if rouge_types is None:
__UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__A , use_stemmer=__A )
if use_aggregator:
__UpperCamelCase = scoring.BootstrapAggregator()
else:
__UpperCamelCase = []
for ref, pred in zip(__A , __A ):
__UpperCamelCase = scorer.score(__A , __A )
if use_aggregator:
aggregator.add_scores(__A )
else:
scores.append(__A )
if use_aggregator:
__UpperCamelCase = aggregator.aggregate()
else:
__UpperCamelCase = {}
for key in scores[0]:
__UpperCamelCase = [score[key] for score in scores]
return result
| 399 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a__ : str =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
a__ : Any =parser.parse_args()
a__ : Union[str, Any] =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 399 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 622 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a__ ( a_ ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 361 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[int] = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : str ) -> YolosConfig:
lowercase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase : Union[str, Any] = 192
lowercase : Tuple = 768
lowercase : Optional[Any] = 12
lowercase : List[str] = 3
lowercase : int = [800, 1333]
lowercase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase : int = 330
lowercase : List[str] = 14
lowercase : Dict = 6
lowercase : Any = 1320
elif "yolos_s" in yolos_name:
lowercase : str = 384
lowercase : int = 1536
lowercase : Any = 12
lowercase : int = 6
elif "yolos_b" in yolos_name:
lowercase : str = [800, 1344]
lowercase : Tuple = 91
lowercase : Tuple = "huggingface/label-files"
lowercase : Any = "coco-detection-id2label.json"
lowercase : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
lowercase : List[str] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Union[str, Any] = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __snake_case : dict , __snake_case : YolosConfig , __snake_case : bool = False ) -> int:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Dict = in_proj_weight[-config.hidden_size :, :]
lowercase : List[Any] = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __snake_case : str ) -> str:
if "backbone" in name:
lowercase : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase : Union[str, Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase : Dict = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase : Optional[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase : Any = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase : List[Any] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase : Optional[Any] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase : Optional[int] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __magic_name__ ( __snake_case : dict , __snake_case : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(__snake_case )
if "qkv" in key:
lowercase : Union[str, Any] = key.split("." )
lowercase : List[Any] = int(key_split[2] )
lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase : int = val[:dim, :]
lowercase : List[Any] = val[
dim : dim * 2, :
]
lowercase : Any = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : str = val[-dim:]
else:
lowercase : List[str] = val
return orig_state_dict
def __magic_name__ ( ) -> torch.Tensor:
lowercase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : bool = False ) -> Dict:
lowercase : Optional[int] = get_yolos_config(__snake_case )
# load original state_dict
lowercase : str = torch.load(__snake_case , map_location="cpu" )["model"]
# load 🤗 model
lowercase : str = YolosForObjectDetection(__snake_case )
model.eval()
lowercase : Dict = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase : Any = 800 if yolos_name != "yolos_ti" else 512
lowercase : List[Any] = YolosImageProcessor(format="coco_detection" , size=__snake_case )
lowercase : Any = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase : Any = model(**__snake_case )
lowercase , lowercase : Tuple = outputs.logits, outputs.pred_boxes
lowercase , lowercase : int = None, None
if yolos_name == "yolos_ti":
lowercase : int = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowercase : List[Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowercase : List[Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowercase : List[Any] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowercase : Optional[int] = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowercase : Tuple = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowercase : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowercase : List[str] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowercase : Optional[Any] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowercase : List[str] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowercase : Optional[int] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__snake_case , organization="hustvl" )
model.push_to_hub(__snake_case , organization="hustvl" )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 361 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""facebook/xglm-564M""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase : int ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase : Optional[Any] =7
lowercase : Optional[int] =[F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase : List[Any] =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowercase : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
lowercase : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Union[str, Any] =1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : List[str] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : str =len(self.sp_model )
lowercase : List[Any] ={F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase__ )
lowercase : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
'''simple docstring'''
lowercase : Optional[int] =self.__dict__.copy()
lowercase : List[Any] =None
lowercase : Tuple =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Optional[int] ={}
lowercase : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase : List[Any] =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ ))
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ ))
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : int =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int ={self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[str] =self.sp_model.PieceToId(UpperCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Any ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase : Dict =''''''.join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , ''' ''' ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
lowercase : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=[1, 1, 2] , _lowerCamelCase=1 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=8 , _lowerCamelCase=37 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=512 , _lowerCamelCase=3 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=False , ):
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Dict = block_sizes
UpperCAmelCase__ : Tuple = num_decoder_layers
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : List[str] = n_head
UpperCAmelCase__ : Optional[Any] = d_head
UpperCAmelCase__ : Tuple = d_inner
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout
UpperCAmelCase__ : str = attention_dropout
UpperCAmelCase__ : Tuple = activation_dropout
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Optional[Any] = type_vocab_size
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : str = num_choices
UpperCAmelCase__ : Tuple = scope
UpperCAmelCase__ : List[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCAmelCase__ : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
UpperCAmelCase__ : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCAmelCase__ : Union[str, Any] = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCAmelCase__ : Optional[int] = self.num_hidden_layers + 2
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : List[str] = None
if self.use_input_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : str = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Optional[int] = TFFunnelModel(config=A_)
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : Any = model(A_)
UpperCAmelCase__ : Tuple = [input_ids, input_mask]
UpperCAmelCase__ : Dict = model(A_)
UpperCAmelCase__ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Union[str, Any] = TFFunnelModel(config=A_)
UpperCAmelCase__ : Optional[int] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = TFFunnelModel(config=A_)
UpperCAmelCase__ : str = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = TFFunnelBaseModel(config=A_)
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : Union[str, Any] = model(A_)
UpperCAmelCase__ : Dict = [input_ids, input_mask]
UpperCAmelCase__ : Any = model(A_)
UpperCAmelCase__ : Optional[int] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = TFFunnelBaseModel(config=A_)
UpperCAmelCase__ : Any = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = TFFunnelBaseModel(config=A_)
UpperCAmelCase__ : Optional[int] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Optional[int] = TFFunnelForPreTraining(config=A_)
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : List[Any] = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Tuple = TFFunnelForMaskedLM(config=A_)
UpperCAmelCase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : str = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Any = TFFunnelForSequenceClassification(config=A_)
UpperCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : Optional[int] = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : Any = TFFunnelForMultipleChoice(config=A_)
UpperCAmelCase__ : List[Any] = tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Union[str, Any] = tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Union[str, Any] = tf.tile(tf.expand_dims(A_ , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase__ : int = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFFunnelForTokenClassification(config=A_)
UpperCAmelCase__ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = TFFunnelForQuestionAnswering(config=A_)
UpperCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase__ : List[Any] = model(A_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : str = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase :Any = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase :str = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :Tuple = False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = TFFunnelModelTester(self)
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=A_)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_)
@require_tf
class _snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase :Optional[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase :Optional[Any] = False
lowerCAmelCase :Dict = False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = TFFunnelModelTester(self , base=A_)
UpperCAmelCase__ : str = ConfigTester(self , config_class=A_)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_) | 407 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
stooge(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return arr
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase: Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase: Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCamelCase , i + t , (_UpperCamelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
if __name__ == "__main__":
A__ : Dict = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 353 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A_ : Dict = None
A_ : Tuple = logging.get_logger(__name__)
A_ : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
A_ : int = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
A_ : List[str] = """▁"""
class _a (UpperCamelCase_ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Any = BigBirdTokenizer
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[str] = []
def __init__( self , A__=None , A__=None , A__="<unk>" , A__="<s>" , A__="</s>" , A__="<pad>" , A__="[SEP]" , A__="[MASK]" , A__="[CLS]" , **A__ , ):
A__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
A__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
A__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
A__ : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
A__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
A__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
A__ : List[Any] = vocab_file
A__ : Tuple = False if not self.vocab_file else True
def __A ( self , A__ , A__ = None ):
A__ : str = [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Union[str, Any] = [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import requests
from bsa import BeautifulSoup
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: dict ) -> str:
_UpperCAmelCase : List[str] = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , "html.parser" )
_UpperCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_UpperCAmelCase : List[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 300 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir("""fixtures""")
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase__ : Optional[int] = mock.Mock()
UpperCamelCase__ : Optional[Any] = 5_00
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[int] = HTTPError
UpperCamelCase__ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
@classmethod
def __lowercase( cls : Tuple ) -> Dict:
UpperCamelCase__ : str = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def __lowercase( cls : List[str] ) -> Any:
try:
delete_repo(token=cls._token, repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase( self : List[str] ) -> List[str]:
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''', use_auth_token=self._token )
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase, repo_id='''test-feature-extractor''', push_to_hub=__lowerCamelCase, use_auth_token=self._token )
UpperCamelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
def __lowercase( self : List[Any] ) -> List[Any]:
UpperCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''', use_auth_token=self._token )
UpperCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase, repo_id='''valid_org/test-feature-extractor-org''', push_to_hub=__lowerCamelCase, use_auth_token=self._token )
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
def __lowercase( self : int ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase__ : Tuple = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''}, )
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor', trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, '''CustomFeatureExtractor''' )
| 344 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( _snake_case ):
lowercase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = emb_layer_norm_before
A_ = token_dropout
A_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = EsmFoldConfig(**UpperCamelCase__ )
A_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A_ = get_default_vocab_list()
else:
A_ = vocab_list
else:
A_ = None
A_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A_ = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
lowercase = None
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = True
lowercase = False
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A_ = TrunkConfig(**self.trunk )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = asdict(self )
A_ = self.trunk.to_dict()
return output
@dataclass
class A__ :
lowercase = 48
lowercase = 1_024
lowercase = 128
lowercase = 32
lowercase = 32
lowercase = 32
lowercase = 0
lowercase = 0
lowercase = False
lowercase = 4
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
A_ = self.sequence_state_dim // self.sequence_head_width
A_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = asdict(self )
A_ = self.structure_module.to_dict()
return output
@dataclass
class A__ :
lowercase = 384
lowercase = 128
lowercase = 16
lowercase = 128
lowercase = 12
lowercase = 4
lowercase = 8
lowercase = 0.1
lowercase = 8
lowercase = 1
lowercase = 2
lowercase = 7
lowercase = 10
lowercase = 1e-8
lowercase = 1e5
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return asdict(self )
def UpperCAmelCase__ ( ) -> List[str]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Dict , _a : int , _a : int , _a : float , **_a : Optional[int] ) -> List[Any]:
__lowerCamelCase : List[Any] = feature_size
__lowerCamelCase : Union[str, Any] = sampling_rate
__lowerCamelCase : Dict = padding_value
__lowerCamelCase : Optional[int] = kwargs.pop('padding_side' , 'right' )
__lowerCamelCase : Dict = kwargs.pop('return_attention_mask' , _a )
super().__init__(**_a )
def _lowercase ( self : Union[str, Any] , _a : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _a : Union[bool, str, PaddingStrategy] = True , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : Optional[bool] = None , _a : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_a , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__lowerCamelCase : Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
__lowerCamelCase : int = processed_features[self.model_input_names[0]]
__lowerCamelCase : Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_a ) == 0:
if return_attention_mask:
__lowerCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__lowerCamelCase : List[str] = required_input[0]
if isinstance(_a , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__lowerCamelCase : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_a ):
__lowerCamelCase : int = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_a ):
__lowerCamelCase : Any = 'tf'
elif is_torch_tensor(_a ):
__lowerCamelCase : int = 'pt'
elif isinstance(_a , (int, float, list, tuple, np.ndarray) ):
__lowerCamelCase : Optional[Any] = 'np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(_a )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__lowerCamelCase : Tuple = to_numpy(_a )
else:
__lowerCamelCase : List[Any] = [to_numpy(_a ) for v in value]
# Convert padding_strategy in PaddingStrategy
__lowerCamelCase : List[Any] = self._get_padding_strategies(padding=_a , max_length=_a )
__lowerCamelCase : Any = processed_features[self.model_input_names[0]]
__lowerCamelCase : Any = len(_a )
if not all(len(_a ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__lowerCamelCase : List[str] = []
for i in range(_a ):
__lowerCamelCase : Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
__lowerCamelCase : List[str] = self._truncate(
_a , max_length=_a , pad_to_multiple_of=_a , truncation=_a , )
truncated_inputs.append(_a )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__lowerCamelCase : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__lowerCamelCase : Dict = PaddingStrategy.MAX_LENGTH
__lowerCamelCase : Any = {}
for i in range(_a ):
# padding
__lowerCamelCase : Any = self._pad(
truncated_inputs[i] , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
for key, value in outputs.items():
if key not in batch_outputs:
__lowerCamelCase : Any = []
if value.dtype is np.dtype(np.floataa ):
__lowerCamelCase : Dict = value.astype(np.floataa )
batch_outputs[key].append(_a )
return BatchFeature(_a , tensor_type=_a )
def _lowercase ( self : Dict , _a : Union[Dict[str, np.ndarray], BatchFeature] , _a : Optional[int] = None , _a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _a : Optional[int] = None , _a : Optional[bool] = None , ) -> dict:
__lowerCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__lowerCamelCase : Any = len(_a )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_a ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__lowerCamelCase : Optional[Any] = np.ones(len(_a ) , dtype=np.intaa )
if needs_to_be_padded:
__lowerCamelCase : List[Any] = max_length - len(_a )
if self.padding_side == "right":
if return_attention_mask:
__lowerCamelCase : List[str] = np.pad(
processed_features['attention_mask'] , (0, difference) )
__lowerCamelCase : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__lowerCamelCase : str = np.pad(
_a , _a , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__lowerCamelCase : str = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__lowerCamelCase : Tuple = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__lowerCamelCase : Optional[int] = np.pad(
_a , _a , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _lowercase ( self : Tuple , _a : Union[Dict[str, np.ndarray], BatchFeature] , _a : Optional[int] = None , _a : Optional[int] = None , _a : Optional[bool] = None , ) -> List[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__lowerCamelCase : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__lowerCamelCase : Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__lowerCamelCase : Optional[Any] = len(_a ) > max_length
if needs_to_be_truncated:
__lowerCamelCase : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__lowerCamelCase : Tuple = processed_features['attention_mask'][:max_length]
return processed_features
def _lowercase ( self : str , _a : Union[str, Any]=False , _a : Optional[int]=None ) -> List[str]:
# Get padding strategy
if padding is not False:
if padding is True:
__lowerCamelCase : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_a , _a ):
__lowerCamelCase : Union[str, Any] = PaddingStrategy(_a )
elif isinstance(_a , _a ):
__lowerCamelCase : Dict = padding
else:
__lowerCamelCase : Optional[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 459 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCamelCase = True
except ImportError:
_UpperCamelCase = False
try:
from torch.hub import _get_torch_home
_UpperCamelCase = _get_torch_home()
except ImportError:
_UpperCamelCase = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_UpperCamelCase = os.path.join(torch_cache_home, 'transformers')
_UpperCamelCase = 'https://cdn.huggingface.co'
_UpperCamelCase = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_UpperCamelCase = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_UpperCamelCase = os.path.join(PATH, 'config.yaml')
_UpperCamelCase = os.path.join(PATH, 'attributes.txt')
_UpperCamelCase = os.path.join(PATH, 'objects.txt')
_UpperCamelCase = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_UpperCamelCase = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCamelCase = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_UpperCamelCase = 'pytorch_model.bin'
_UpperCamelCase = 'config.yaml'
def a_ ( _lowerCAmelCase=OBJECTS ,_lowerCAmelCase=ATTRIBUTES ) -> Union[str, Any]:
__lowerCamelCase : Dict = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__lowerCamelCase : List[Any] = []
with open(_lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Any = OrderedDict()
with open(_lowerCAmelCase ,'rb' ) as f:
__lowerCamelCase : str = pkl.load(_lowerCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCamelCase : Any = ckp.pop(_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,np.ndarray ):
__lowerCamelCase : Dict = torch.tensor(_lowerCAmelCase )
else:
assert isinstance(_lowerCAmelCase ,torch.tensor ), type(_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = v
return r
class lowerCamelCase_ :
"""simple docstring"""
a_ ={}
def __init__( self : int , _a : dict , _a : str = "root" , _a : int=0 ) -> Union[str, Any]:
__lowerCamelCase : Dict = name
__lowerCamelCase : Union[str, Any] = level
__lowerCamelCase : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCamelCase : int = copy.deepcopy(_a )
__lowerCamelCase : List[str] = copy.deepcopy(_a )
if isinstance(_a , _a ):
__lowerCamelCase : str = Config(_a , name=_a , level=level + 1 )
__lowerCamelCase : Union[str, Any] = v
setattr(self , _a , _a )
__lowerCamelCase : Optional[int] = d
def __repr__( self : int ) -> Any:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int , _a : Tuple , _a : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Optional[Any] = val
__lowerCamelCase : List[str] = key.split('.' )
__lowerCamelCase : str = len(_a ) - 1
__lowerCamelCase : str = self._pointer
if len(_a ) > 1:
for i, l in enumerate(_a ):
if hasattr(self , _a ) and isinstance(getattr(self , _a ) , _a ):
setattr(getattr(self , _a ) , '.'.join(levels[i:] ) , _a )
if l == last_level:
__lowerCamelCase : Union[str, Any] = val
else:
__lowerCamelCase : List[Any] = pointer[l]
def _lowercase ( self : Dict ) -> Optional[Any]:
return self._pointer
def _lowercase ( self : Any , _a : List[str] , _a : List[str] ) -> Dict:
with open(f'{file_name}' , 'w' ) as stream:
dump(_a , _a )
def _lowercase ( self : Any , _a : Union[str, Any] , _a : List[Any] ) -> List[Any]:
with open(f'{file_name}' , 'w' ) as stream:
json.dump(_a , _a )
@staticmethod
def _lowercase ( _a : Any ) -> List[str]:
with open(_a ) as stream:
__lowerCamelCase : Dict = load(_a , Loader=_a )
return data
def __str__( self : Any ) -> List[str]:
__lowerCamelCase : Dict = ' '
if self._name != "root":
__lowerCamelCase : Dict = f'{t * (self._level-1)}{self._name}:\n'
else:
__lowerCamelCase : str = ''
__lowerCamelCase : List[str] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_a , _a ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n'
__lowerCamelCase : Any = level
return r[:-1]
@classmethod
def _lowercase ( cls : int , _a : str , **_a : List[str] ) -> Union[str, Any]:
__lowerCamelCase ,__lowerCamelCase : str = cls.get_config_dict(_a , **_a )
return cls(_a )
@classmethod
def _lowercase ( cls : Dict , _a : str , **_a : List[str] ) -> List[Any]:
__lowerCamelCase : str = kwargs.pop('cache_dir' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('force_download' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('resume_download' , _a )
__lowerCamelCase : List[Any] = kwargs.pop('proxies' , _a )
__lowerCamelCase : Optional[Any] = kwargs.pop('local_files_only' , _a )
if os.path.isdir(_a ):
__lowerCamelCase : Tuple = os.path.join(_a , _a )
elif os.path.isfile(_a ) or is_remote_url(_a ):
__lowerCamelCase : List[str] = pretrained_model_name_or_path
else:
__lowerCamelCase : List[Any] = hf_bucket_url(_a , filename=_a , use_cdn=_a )
try:
# Load from URL or cache if already cached
__lowerCamelCase : str = cached_path(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCamelCase : Optional[Any] = Config.load_yaml(_a )
except EnvironmentError:
__lowerCamelCase : Optional[int] = 'Can\'t load config for'
raise EnvironmentError(_a )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_a ), kwargs
def a_ ( _lowerCAmelCase ) -> Dict:
__lowerCamelCase : Dict = torch.load('dump.pt' ,map_location=in_tensor.device )
__lowerCamelCase : Optional[int] = in_tensor.numpy()
__lowerCamelCase : int = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,rtol=0.01 ,atol=0.1 ), (
F'{sum([1 for x in np.isclose(_lowerCAmelCase ,_lowerCAmelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : List[str] = urlparse(_lowerCAmelCase )
return parsed.scheme in ("http", "https")
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=True ) -> str:
__lowerCamelCase : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCamelCase : List[str] = '/' not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=0 ,_lowerCAmelCase=None ,) -> Any:
__lowerCamelCase : Tuple = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(_lowerCAmelCase ,_lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
ua += "; " + user_agent
__lowerCamelCase : List[Any] = {'user-agent': ua}
if resume_size > 0:
__lowerCamelCase : List[str] = 'bytes=%d-' % (resume_size,)
__lowerCamelCase : Optional[Any] = requests.get(_lowerCAmelCase ,stream=_lowerCAmelCase ,proxies=_lowerCAmelCase ,headers=_lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCamelCase : List[Any] = response.headers.get('Content-Length' )
__lowerCamelCase : Tuple = resume_size + int(_lowerCAmelCase ) if content_length is not None else None
__lowerCamelCase : int = tqdm(
unit='B' ,unit_scale=_lowerCAmelCase ,total=_lowerCAmelCase ,initial=_lowerCAmelCase ,desc='Downloading' ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCAmelCase ) )
temp_file.write(_lowerCAmelCase )
progress.close()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=10 ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,) -> List[str]:
if cache_dir is None:
__lowerCamelCase : Any = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Tuple = str(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
__lowerCamelCase : int = None
if not local_files_only:
try:
__lowerCamelCase : Optional[int] = requests.head(_lowerCAmelCase ,allow_redirects=_lowerCAmelCase ,proxies=_lowerCAmelCase ,timeout=_lowerCAmelCase )
if response.status_code == 200:
__lowerCamelCase : List[Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCamelCase : Tuple = url_to_filename(_lowerCAmelCase ,_lowerCAmelCase )
# get cache path to put the file
__lowerCamelCase : Tuple = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCAmelCase ):
return cache_path
else:
__lowerCamelCase : int = [
file
for file in fnmatch.filter(os.listdir(_lowerCAmelCase ) ,filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_lowerCAmelCase ) > 0:
return os.path.join(_lowerCAmelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCamelCase : Union[str, Any] = cache_path + '.lock'
with FileLock(_lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCamelCase : Optional[int] = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_lowerCAmelCase ,'a+b' ) as f:
yield f
__lowerCamelCase : Optional[int] = _resumable_file_manager
if os.path.exists(_lowerCAmelCase ):
__lowerCamelCase : int = os.stat(_lowerCAmelCase ).st_size
else:
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : str = partial(tempfile.NamedTemporaryFile ,dir=_lowerCAmelCase ,delete=_lowerCAmelCase )
__lowerCamelCase : List[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' ,_lowerCAmelCase ,temp_file.name ,)
http_get(
_lowerCAmelCase ,_lowerCAmelCase ,proxies=_lowerCAmelCase ,resume_size=_lowerCAmelCase ,user_agent=_lowerCAmelCase ,)
os.replace(temp_file.name ,_lowerCAmelCase )
__lowerCamelCase : List[Any] = {'url': url, 'etag': etag}
__lowerCamelCase : str = cache_path + '.json'
with open(_lowerCAmelCase ,'w' ) as meta_file:
json.dump(_lowerCAmelCase ,_lowerCAmelCase )
return cache_path
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ) -> Dict:
__lowerCamelCase : Any = url.encode('utf-8' )
__lowerCamelCase : Tuple = shaaaa(_lowerCAmelCase )
__lowerCamelCase : Dict = url_hash.hexdigest()
if etag:
__lowerCamelCase : List[str] = etag.encode('utf-8' )
__lowerCamelCase : Any = shaaaa(_lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,) -> str:
if cache_dir is None:
__lowerCamelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = str(_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = str(_lowerCAmelCase )
if is_remote_url(_lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCamelCase : Optional[Any] = get_from_cache(
_lowerCAmelCase ,cache_dir=_lowerCAmelCase ,force_download=_lowerCAmelCase ,proxies=_lowerCAmelCase ,resume_download=_lowerCAmelCase ,user_agent=_lowerCAmelCase ,local_files_only=_lowerCAmelCase ,)
elif os.path.exists(_lowerCAmelCase ):
# File, and it exists.
__lowerCamelCase : List[Any] = url_or_filename
elif urlparse(_lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCAmelCase ) and not tarfile.is_tarfile(_lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCamelCase ,__lowerCamelCase : Tuple = os.path.split(_lowerCAmelCase )
__lowerCamelCase : str = output_file.replace('.' ,'-' ) + '-extracted'
__lowerCamelCase : Optional[int] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ) and os.listdir(_lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCamelCase : List[Any] = output_path + '.lock'
with FileLock(_lowerCAmelCase ):
shutil.rmtree(_lowerCAmelCase ,ignore_errors=_lowerCAmelCase )
os.makedirs(_lowerCAmelCase )
if is_zipfile(_lowerCAmelCase ):
with ZipFile(_lowerCAmelCase ,'r' ) as zip_file:
zip_file.extractall(_lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCAmelCase ):
__lowerCamelCase : Tuple = tarfile.open(_lowerCAmelCase )
tar_file.extractall(_lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_lowerCAmelCase ) )
return output_path_extracted
return output_path
def a_ ( _lowerCAmelCase ,_lowerCAmelCase="," ) -> List[str]:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
with open(_lowerCAmelCase ) as f:
__lowerCamelCase : Optional[Any] = eval(f.read() )
else:
__lowerCamelCase : Union[str, Any] = requests.get(_lowerCAmelCase )
try:
__lowerCamelCase : Optional[Any] = requests.json()
except Exception:
__lowerCamelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCamelCase : Dict = eval(_lowerCAmelCase )
except Exception:
__lowerCamelCase : Dict = data.split('\n' )
req.close()
return data
def a_ ( _lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : str = requests.get(_lowerCAmelCase )
__lowerCamelCase : Dict = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a_ ( _lowerCAmelCase ) -> Optional[int]:
__lowerCamelCase : str = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCAmelCase )
with open(_lowerCAmelCase ,'rb' ) as stream:
__lowerCamelCase : str = pkl.load(_lowerCAmelCase )
__lowerCamelCase : Optional[int] = weights.pop('model' )
__lowerCamelCase : Dict = {}
for k, v in model.items():
__lowerCamelCase : str = torch.from_numpy(_lowerCAmelCase )
if "running_var" in k:
__lowerCamelCase : Tuple = torch.tensor([0] )
__lowerCamelCase : Any = k.replace('running_var' ,'num_batches_tracked' )
__lowerCamelCase : List[Any] = zero
return new
def a_ ( ) -> Dict:
print(F'{os.path.abspath(os.path.join(_lowerCAmelCase ,os.pardir ) )}/demo.ipynb' )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase="RGB" ) -> List[Any]:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
__lowerCamelCase : Any = cva.imread(_lowerCAmelCase )
else:
__lowerCamelCase : int = get_image_from_url(_lowerCAmelCase )
assert img is not None, F'could not connect to: {im}'
__lowerCamelCase : Dict = cva.cvtColor(_lowerCAmelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCamelCase : Any = img[:, :, ::-1]
return img
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=1 ) -> Union[str, Any]:
return (images[i : i + batch] for i in range(0 ,len(_lowerCAmelCase ) ,_lowerCAmelCase ))
| 459 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=UpperCAmelCase_ , )
assert hasattr(self , "env" )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : Dict=1 ):
"""simple docstring"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
# create estimator
__UpperCAmelCase : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase_ )
| 329 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ : List[str] = TypeVar("KEY")
lowerCAmelCase__ : str = TypeVar("VAL")
@dataclass(frozen=snake_case__ ,slots=snake_case__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( _Item ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __bool__( self : Dict ):
"""simple docstring"""
return False
lowerCAmelCase__ : str = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.75 ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = initial_block_size
__UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCAmelCase : int = capacity_factor
__UpperCAmelCase : int = 0
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : KEY ):
"""simple docstring"""
return hash(UpperCAmelCase_ ) % len(self._buckets )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : int ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
"""simple docstring"""
__UpperCAmelCase : Tuple = self._buckets[ind]
if not stored:
__UpperCAmelCase : str = _Item(UpperCAmelCase_ , UpperCAmelCase_ )
self._len += 1
return True
elif stored.key == key:
__UpperCAmelCase : List[Any] = _Item(UpperCAmelCase_ , UpperCAmelCase_ )
return True
else:
return False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCAmelCase : Dict = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self._buckets
__UpperCAmelCase : str = [None] * new_size
__UpperCAmelCase : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : KEY ):
"""simple docstring"""
__UpperCAmelCase : str = self._get_bucket_index(UpperCAmelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCAmelCase : Optional[Any] = self._get_next_ind(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCAmelCase_ ):
if self._try_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
break
def __setitem__( self : Tuple , UpperCAmelCase_ : KEY , UpperCAmelCase_ : VAL ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(UpperCAmelCase_ , UpperCAmelCase_ )
def __delitem__( self : Optional[int] , UpperCAmelCase_ : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(UpperCAmelCase_ )
if item is _deleted:
continue
if item.key == key:
__UpperCAmelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , UpperCAmelCase_ : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(UpperCAmelCase_ ):
__UpperCAmelCase : str = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCAmelCase_ )
def __len__( self : List[Any] ):
"""simple docstring"""
return self._len
def __iter__( self : Optional[int] ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = " ,".join(
f"{item.key}: {item.val}" for item in self._buckets if item )
return f"HashMap({val_string})"
| 329 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = '''facebook/bart-large-mnli'''
A__ : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
A__ : List[Any] = '''text_classifier'''
A__ : Any = AutoTokenizer
A__ : Union[str, Any] = AutoModelForSequenceClassification
A__ : str = ['''text''', ['''text''']]
A__ : Tuple = ['''text''']
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().setup()
_snake_case = self.model.config
_snake_case = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_snake_case = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = outputs.logits
_snake_case = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 103 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt'''}
snake_case = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
snake_case = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def snake_case ( lowerCAmelCase_ ) -> int:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Tuple=2_0_0 ):
"""simple docstring"""
_snake_case = vocab
_snake_case = unk_token
_snake_case = max_input_chars_per_word
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = list(__lowerCamelCase )
if len(__lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_snake_case = 0
_snake_case = []
while start < len(__lowerCamelCase ):
_snake_case = len(__lowerCamelCase )
_snake_case = None
while start < end:
_snake_case = ''''''.join(chars[start:end] )
if substr in self.vocab:
_snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCamelCase )
_snake_case = end
return sub_tokens
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
A__ : Optional[int] = False
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str="<d>" , __lowerCamelCase : Tuple="</d>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : int="</n>" , __lowerCamelCase : Tuple="</_>" , __lowerCamelCase : Optional[Any]="left" , **__lowerCamelCase : str , ):
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=__lowerCamelCase , eod_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , unk_token=__lowerCamelCase , line_token=__lowerCamelCase , space_token=__lowerCamelCase , padding_side=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = bod_token
_snake_case = eod_token
_snake_case = load_vocab(__lowerCamelCase )
_snake_case = self.encoder[space_token]
_snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = []
for x in jieba.cut(__lowerCamelCase , cut_all=__lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = [i for i in token_ids if i >= 0]
_snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return token in self.encoder
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ):
"""simple docstring"""
return "".join(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int ):
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(__lowerCamelCase ):
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
_snake_case = 0
if " " in self.encoder:
_snake_case = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
_snake_case = self.encoder['''\n''']
del self.encoder["\n"]
_snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCamelCase : x[1] ) )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase ))
| 103 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple=13 , snake_case : Tuple=2 , snake_case : str=24 , snake_case : int=16 , snake_case : List[Any]=True , snake_case : Tuple=True , snake_case : str=32 , snake_case : List[Any]=5 , snake_case : Optional[int]=4 , snake_case : Optional[Any]=37 , snake_case : Any="gelu" , snake_case : Any=0.1 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=10 , snake_case : Optional[int]=0.02 , snake_case : List[str]=None , snake_case : Union[str, Any]=2 , snake_case : Any=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_length
SCREAMING_SNAKE_CASE : List[Any] = num_mel_bins
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : int = frequency_stride
SCREAMING_SNAKE_CASE : List[str] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE : Dict = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE : Tuple = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE : List[str] = num_patches + 2
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_values, labels
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase_ ( self : List[str] , snake_case : List[str] , snake_case : Optional[int] , snake_case : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ASTModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : str = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Any = False
def lowerCamelCase_ ( self : Tuple , snake_case : Tuple , snake_case : int , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : List[str] ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ASTModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(snake_case )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['input_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = ASTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __a ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_feature_extractor
SCREAMING_SNAKE_CASE : Dict = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = self.default_feature_extractor
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prepare_audio()
SCREAMING_SNAKE_CASE : Optional[Any] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(snake_case , sampling_rate=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**snake_case )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , snake_case )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) ) | 308 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 'transfo-xl'
UpperCAmelCase : List[str] = ['mems']
UpperCAmelCase : Optional[Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[int] , snake_case : Tuple=267735 , snake_case : Optional[Any]=[20000, 40000, 200000] , snake_case : List[Any]=1024 , snake_case : List[Any]=1024 , snake_case : List[Any]=16 , snake_case : int=64 , snake_case : Optional[int]=4096 , snake_case : Union[str, Any]=4 , snake_case : List[str]=False , snake_case : int=18 , snake_case : List[Any]=1600 , snake_case : Union[str, Any]=1000 , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=0 , snake_case : Dict=-1 , snake_case : List[Any]=True , snake_case : Any=0.1 , snake_case : List[Any]=0.0 , snake_case : List[str]=True , snake_case : Optional[Any]="normal" , snake_case : Optional[Any]=0.01 , snake_case : Union[str, Any]=0.01 , snake_case : List[str]=0.02 , snake_case : List[str]=1E-5 , snake_case : Optional[int]=0 , **snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Any = []
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE : Tuple = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE : List[Any] = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Any = d_embed
SCREAMING_SNAKE_CASE : Tuple = d_head
SCREAMING_SNAKE_CASE : Union[str, Any] = d_inner
SCREAMING_SNAKE_CASE : Tuple = div_val
SCREAMING_SNAKE_CASE : int = pre_lnorm
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[str] = n_head
SCREAMING_SNAKE_CASE : Dict = mem_len
SCREAMING_SNAKE_CASE : Dict = same_length
SCREAMING_SNAKE_CASE : Union[str, Any] = attn_type
SCREAMING_SNAKE_CASE : str = clamp_len
SCREAMING_SNAKE_CASE : Any = sample_softmax
SCREAMING_SNAKE_CASE : Optional[int] = adaptive
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = dropatt
SCREAMING_SNAKE_CASE : List[str] = untie_r
SCREAMING_SNAKE_CASE : Union[str, Any] = init
SCREAMING_SNAKE_CASE : Optional[int] = init_range
SCREAMING_SNAKE_CASE : Tuple = proj_init_std
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=snake_case , **snake_case )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCamelCase_ ( self : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) | 308 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : "DiagonalGaussianDistribution"
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : List[Any] = True
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (6_4,) , _A = 1 , _A = "silu" , _A = 4 , _A = 3_2 , _A = 3_2 , _A = 0.1_82_15 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCamelCase : str = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
UpperCamelCase : Any = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
UpperCamelCase : Union[str, Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCamelCase : Any = nn.Convad(_A , _A , 1 )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Optional[int] = False
# only relevant if vae tiling is enabled
UpperCamelCase : Union[str, Any] = self.config.sample_size
UpperCamelCase : Dict = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCamelCase : Optional[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase : Optional[int] = 0.25
def _a ( self , _A , _A=False ):
'''simple docstring'''
if isinstance(_A , (Encoder, Decoder) ):
UpperCamelCase : List[Any] = value
def _a ( self , _A = True ):
'''simple docstring'''
UpperCamelCase : Tuple = use_tiling
def _a ( self ):
'''simple docstring'''
self.enable_tiling(_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = True
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = {}
def fn_recursive_add_processors(_A , _A , _A ):
if hasattr(_A , """set_processor""" ):
UpperCamelCase : List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A , _A , _A ):
if hasattr(_A , """set_processor""" ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def _a ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _a ( self , _A , _A = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase : str = [self.encoder(_A ) for x_slice in x.split(1 )]
UpperCamelCase : List[str] = torch.cat(_A )
else:
UpperCamelCase : str = self.encoder(_A )
UpperCamelCase : str = self.quant_conv(_A )
UpperCamelCase : Optional[int] = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def _a ( self , _A , _A = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
UpperCamelCase : Dict = self.post_quant_conv(_A )
UpperCamelCase : List[Any] = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def _a ( self , _A , _A = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase : Optional[int] = [self._decode(_A ).sample for z_slice in z.split(1 )]
UpperCamelCase : Union[str, Any] = torch.cat(_A )
else:
UpperCamelCase : str = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
UpperCamelCase : Optional[int] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
UpperCamelCase : Optional[int] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _a ( self , _A , _A = True ):
'''simple docstring'''
UpperCamelCase : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase : Optional[Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase : int = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase : Any = []
for i in range(0 , x.shape[2] , _A ):
UpperCamelCase : Optional[Any] = []
for j in range(0 , x.shape[3] , _A ):
UpperCamelCase : Optional[Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase : Optional[int] = self.encoder(_A )
UpperCamelCase : List[Any] = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
UpperCamelCase : Union[str, Any] = []
for i, row in enumerate(_A ):
UpperCamelCase : Any = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase : Optional[int] = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
UpperCamelCase : Any = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
UpperCamelCase : Union[str, Any] = torch.cat(_A , dim=2 )
UpperCamelCase : str = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def _a ( self , _A , _A = True ):
'''simple docstring'''
UpperCamelCase : int = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase : Optional[Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase : Tuple = []
for i in range(0 , z.shape[2] , _A ):
UpperCamelCase : Optional[int] = []
for j in range(0 , z.shape[3] , _A ):
UpperCamelCase : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase : Optional[Any] = self.post_quant_conv(_A )
UpperCamelCase : List[str] = self.decoder(_A )
row.append(_A )
rows.append(_A )
UpperCamelCase : List[str] = []
for i, row in enumerate(_A ):
UpperCamelCase : Tuple = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase : List[Any] = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
UpperCamelCase : Any = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
UpperCamelCase : Tuple = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def _a ( self , _A , _A = False , _A = True , _A = None , ):
'''simple docstring'''
UpperCamelCase : Tuple = sample
UpperCamelCase : Union[str, Any] = self.encode(_A ).latent_dist
if sample_posterior:
UpperCamelCase : Optional[Any] = posterior.sample(generator=_A )
else:
UpperCamelCase : Dict = posterior.mode()
UpperCamelCase : Any = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 102 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
__UpperCAmelCase =multiprocessing.Manager()
__UpperCAmelCase =manager.list()
__UpperCAmelCase =multiprocessing.Process(target=snake_case__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__UpperCAmelCase =shutil.rmtree
__UpperCAmelCase =os.rmdir
__UpperCAmelCase =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__UpperCAmelCase ={}
with swallow_io():
with time_limit(snake_case__ ):
exec(snake_case__ , snake_case__ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
__UpperCAmelCase =rmtree
__UpperCAmelCase =rmdir
__UpperCAmelCase =chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
def signal_handler(snake_case__ , snake_case__ ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case__ )
signal.signal(signal.SIGALRM , snake_case__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__UpperCAmelCase =WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case__ ):
with contextlib.redirect_stderr(snake_case__ ):
with redirect_stdin(snake_case__ ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Dict:
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case__ ):
yield dirname
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
pass
class _SCREAMING_SNAKE_CASE ( io.StringIO ):
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
raise OSError
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return False
class _SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore
a_ : Dict = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
if root == ".":
yield
return
__UpperCAmelCase =os.getcwd()
os.chdir(snake_case__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__UpperCAmelCase =None
__UpperCAmelCase =None
import os
__UpperCAmelCase ='''1'''
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import shutil
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
import subprocess
__UpperCAmelCase =None # type: ignore
__UpperCAmelCase =None
import sys
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
| 132 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Dict = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = "blip_text_model"
def __init__( self : int , lowerCAmelCase : Dict=3_05_24 , lowerCAmelCase : Dict=7_68 , lowerCAmelCase : Dict=7_68 , lowerCAmelCase : Union[str, Any]=30_72 , lowerCAmelCase : Any=7_68 , lowerCAmelCase : Dict=12 , lowerCAmelCase : int=8 , lowerCAmelCase : List[Any]=5_12 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Union[str, Any]=1E-1_2 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : int=0.02 , lowerCAmelCase : str=3_05_22 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : str=0 , lowerCAmelCase : Optional[Any]=1_02 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Dict=True , **lowerCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = encoder_hidden_size
lowercase__ = intermediate_size
lowercase__ = projection_dim
lowercase__ = hidden_dropout_prob
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = max_position_embeddings
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = attention_probs_dropout_prob
lowercase__ = is_decoder
lowercase__ = use_cache
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : List[Any]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
lowercase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Dict = "blip_vision_model"
def __init__( self : Any , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : int=30_72 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Optional[Any]=3_84 , lowerCAmelCase : Any=16 , lowerCAmelCase : str="gelu" , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[str]=1E-1_0 , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = projection_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls : str , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Dict) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
lowercase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "blip"
A : str = True
def __init__( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : Tuple=2.65_92 , lowerCAmelCase : List[Any]=2_56 , **lowerCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if text_config is None:
lowercase__ = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
lowercase__ = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
lowercase__ = BlipTextConfig(**lowerCAmelCase)
lowercase__ = BlipVisionConfig(**lowerCAmelCase)
lowercase__ = self.vision_config.hidden_size
lowercase__ = projection_dim
lowercase__ = logit_scale_init_value
lowercase__ = 1.0
lowercase__ = 0.02
lowercase__ = image_text_hidden_size
@classmethod
def UpperCAmelCase ( cls : Dict , lowerCAmelCase : BlipTextConfig , lowerCAmelCase : BlipVisionConfig , **lowerCAmelCase : Dict) -> str:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
lowercase__ = self.text_config.to_dict()
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 642 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642 | 1 |
'''simple docstring'''
from __future__ import annotations
a : Optional[Any] = '''Muhammad Umer Farooq'''
a : int = '''MIT'''
a : Dict = '''1.0.0'''
a : Optional[int] = '''Muhammad Umer Farooq'''
a : Optional[Any] = '''[email protected]'''
a : Union[str, Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def A ( self : str , a_ : str , a_ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a_ )
self.urls.append(a_ )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split("." )[-2:] )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def __UpperCAmelCase ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
a : Any = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 69 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''OwlViTFeatureExtractor''']
a = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a = 2
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
_UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : List[str]=None , ):
_A , _A , _A , _A = bos, unk, pad, eos
_A = []
_A = []
_A = {}
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
_A = self.add_symbol(_UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_UpperCAmelCase )
_A = len(self.symbols )
def __eq__( self : int , _UpperCAmelCase : Optional[Any] ):
return self.indices == other.indices
def __getitem__( self : List[str] , _UpperCAmelCase : str ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
return len(self.symbols )
def __contains__( self : Union[str, Any] , _UpperCAmelCase : str ):
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls : str , _UpperCAmelCase : Tuple ):
_A = cls()
d.add_from_file(_UpperCAmelCase )
return d
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any=1 , _UpperCAmelCase : Tuple=False ):
if word in self.indices and not overwrite:
_A = self.indices[word]
_A = self.count[idx] + n
return idx
else:
_A = len(self.symbols )
_A = idx
self.symbols.append(_UpperCAmelCase )
self.count.append(_UpperCAmelCase )
return idx
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple ):
return 0
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_UpperCAmelCase ) )
return
_A = f.readlines()
_A = self._load_meta(_UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_A , _A = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_A = True
_A , _A = line.rsplit(' ' , 1 )
else:
_A = False
_A = int(_UpperCAmelCase )
_A = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_UpperCAmelCase ) )
self.add_symbol(_UpperCAmelCase , n=_UpperCAmelCase , overwrite=_UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def _snake_case ( _snake_case : int ) -> Optional[Any]:
'''simple docstring'''
_A = dict((re.sub(R'@@$' , '' , _snake_case ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _snake_case ), v) for k, v in d.items() )
_A = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_A = d[k] # restore
return da
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple ) -> List[Any]:
'''simple docstring'''
if not os.path.exists(_snake_case ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_A = os.path.join(_snake_case , 'checkpoint.pt' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
_A = torch.load(_snake_case , map_location='cpu' )
_A = chkpt['cfg']['model']
# dicts
_A = os.path.join(_snake_case , 'dict.txt' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
_A = Dictionary.load(_snake_case )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(_snake_case )
_A = os.path.join(_snake_case , VOCAB_FILES_NAMES['vocab_file'] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# merges_file (bpecodes)
_A = os.path.join(_snake_case , 'bpecodes' )
if not os.path.isfile(_snake_case ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
_A = os.path.join(_snake_case , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(_snake_case , _snake_case )
# model config
_A = os.path.join(_snake_case , 'config.json' )
_A = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# tokenizer config
_A = os.path.join(_snake_case , _snake_case )
_A = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) )
# model
_A = chkpt['model']
# remove unneeded keys
_A = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(_snake_case , _snake_case )
_A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A = model_state_dict.pop(_snake_case )
else:
_A = model_state_dict.pop(_snake_case )
_A = BioGptConfig.from_pretrained(_snake_case )
_A = BioGptForCausalLM(_snake_case )
# check that it loads ok
model_new.load_state_dict(_snake_case )
# save
_A = os.path.join(_snake_case , _snake_case )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_snake_case , _snake_case )
print('Conversion is done!' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 505 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : list , a : int = 0 ) ->Optional[Any]:
snake_case = length or len(lowerCAmelCase_ )
snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case , snake_case = list_data[i + 1], list_data[i]
snake_case = True
return list_data if not swapped else bubble_sort(lowerCAmelCase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = """pixel_values"""
a_ = False
a_ = TimmBackboneConfig
def __init__( self : Tuple , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
requires_backends(self , 'timm' )
super().__init__(lowerCAmelCase_ )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCAmelCase_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__lowerCAmelCase = getattr(lowerCAmelCase_ , 'use_pretrained_backbone' , lowerCAmelCase_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(lowerCAmelCase_ , 'out_indices' , lowerCAmelCase_ ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=lowerCAmelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCAmelCase_ , **lowerCAmelCase_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer['module']: str(lowerCAmelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase_ )
@classmethod
def lowercase ( cls : int , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop('config' , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop('use_timm_backbone' , lowerCAmelCase_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__lowerCAmelCase = kwargs.pop('num_channels' , config.num_channels )
__lowerCAmelCase = kwargs.pop('features_only' , config.features_only )
__lowerCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop('out_indices' , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , features_only=lowerCAmelCase_ , use_pretrained_backbone=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , )
return super()._from_config(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : int ) -> Dict:
pass
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(lowerCAmelCase_ )
__lowerCAmelCase = tuple(lowerCAmelCase_ ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , attentions=lowerCAmelCase_ )
| 53 | 0 |
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Union[str, Any], snake_case :Optional[int]):
"""simple docstring"""
_lowercase =arr.split(',')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =[int(self.array[0])] * len(self.array)
_lowercase =[int(self.array[0])] * len(self.array)
for i in range(1, len(self.array)):
_lowercase =max(
int(self.array[i]) + sum_value[i - 1], int(self.array[i]))
_lowercase =max(sum_value[i], rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("please input some numbers:")
_SCREAMING_SNAKE_CASE = SubArray(whole_array)
_SCREAMING_SNAKE_CASE = array.solve_sub_array()
print(("the results is:", re))
| 713 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def __init__( self :str, *snake_case :Any, **snake_case :Tuple):
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.', snake_case, )
super().__init__(*snake_case, **snake_case)
| 557 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
SCREAMING_SNAKE_CASE_: Any =logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_: Any ={
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : List[Any] = """bertabs"""
def __init__(self : Any , __a : int=30522 , __a : Tuple=512 , __a : Tuple=6 , __a : Dict=512 , __a : int=8 , __a : List[Any]=512 , __a : List[str]=0.2 , __a : List[Any]=6 , __a : int=768 , __a : Any=8 , __a : Dict=2048 , __a : Tuple=0.2 , **__a : Optional[int] , ):
super().__init__(**__a )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_pos
UpperCAmelCase_ = enc_layers
UpperCAmelCase_ = enc_hidden_size
UpperCAmelCase_ = enc_heads
UpperCAmelCase_ = enc_ff_size
UpperCAmelCase_ = enc_dropout
UpperCAmelCase_ = dec_layers
UpperCAmelCase_ = dec_hidden_size
UpperCAmelCase_ = dec_heads
UpperCAmelCase_ = dec_ff_size
UpperCAmelCase_ = dec_dropout
| 78 |
"""simple docstring"""
def _snake_case ( lowercase__ = 1 , lowercase__ = 1000 ):
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = 0
for divide_by_number in range(lowercase__ , digit + 1 ):
_lowerCamelCase : list[int] = []
_lowerCamelCase : Dict = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase__ ):
_lowerCamelCase : Any = len(lowercase__ )
_lowerCamelCase : Any = divide_by_number
else:
has_been_divided.append(lowercase__ )
_lowerCamelCase : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 630 | 0 |
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( __UpperCAmelCase ) -> list[tuple[int, int]]:
"""simple docstring"""
snake_case: Optional[int] =0
snake_case: Dict =len(__UpperCAmelCase ) # No of vertices in graph
snake_case: Optional[int] =[0] * n
snake_case: str =[False] * n
def dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
snake_case: str =True
snake_case: Optional[int] =id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , id_ )
snake_case: Tuple =min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case: List[Any] =min(low[at] , low[to] )
snake_case: list[tuple[int, int]] =[]
for i in range(__UpperCAmelCase ):
if not visited[i]:
dfs(__UpperCAmelCase , -1 , __UpperCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a = logging.getLogger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Any = """sequence-classification"""
def __init__( self : int , a_ : str ) -> str:
if type(a_ ) == dict:
snake_case: List[Any] =Namespace(**a_ )
snake_case: Tuple =glue_output_modes[hparams.task]
snake_case: Any =glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def UpperCamelCase ( self : Tuple , **a_ : Tuple ) -> Union[str, Any]:
return self.model(**a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
snake_case: Any ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Optional[int] =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: Optional[int] =self(**a_ )
snake_case: Any =outputs[0]
snake_case: Union[str, Any] =self.trainer.lr_schedulers[0]['scheduler']
snake_case: str ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : str ) -> Tuple:
snake_case: int =self.hparams
snake_case: Union[str, Any] =processors[args.task]()
snake_case: Union[str, Any] =processor.get_labels()
for mode in ["train", "dev"]:
snake_case: Optional[Any] =self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
snake_case: int =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
snake_case: Tuple =convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_ )
torch.save(a_ , a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : int , a_ : bool = False ) -> DataLoader:
snake_case: List[Any] ='dev' if mode == 'test' else mode
snake_case: Union[str, Any] =self._feature_file(a_ )
logger.info('Loading features from cached file %s' , a_ )
snake_case: Dict =torch.load(a_ )
snake_case: Union[str, Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case: List[Any] =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case: str =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case: Optional[Any] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case: Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[int] , a_ : Any ) -> Dict:
snake_case: int ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Tuple =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: List[str] =self(**a_ )
snake_case , snake_case: str =outputs[:2]
snake_case: Any =logits.detach().cpu().numpy()
snake_case: Union[str, Any] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : int , a_ : Union[str, Any] ) -> tuple:
snake_case: Optional[Any] =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
snake_case: str =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case: Union[str, Any] =np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case: Optional[Any] =np.squeeze(a_ )
snake_case: Tuple =np.concatenate([x['target'] for x in outputs] , axis=0 )
snake_case: Any =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: str =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: int ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
snake_case: Union[str, Any] =dict(results.items() )
snake_case: Dict =results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : str , a_ : list ) -> dict:
snake_case , snake_case , snake_case: Union[str, Any] =self._eval_end(a_ )
snake_case: Optional[Any] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Tuple , a_ : Tuple ) -> dict:
snake_case , snake_case , snake_case: int =self._eval_end(a_ )
snake_case: List[Any] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( a_ : Optional[int] , a_ : Dict ) -> Tuple:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Tuple =argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
snake_case: List[Any] =GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
snake_case: Optional[int] =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case: Optional[int] =os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case: str =GLUETransformer(__UpperCAmelCase )
snake_case: Tuple =generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case: str =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCAmelCase ) )
snake_case: int =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 347 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableDiffusionSAGPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = False
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(__A )
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , __A , __A=0 ):
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def snake_case_ ( self ):
__a = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sag_pipe.to(__A )
sag_pipe.set_progress_bar_config(disable=__A )
__a = """."""
__a = torch.manual_seed(0 )
__a = sag_pipe(
[prompt] , width=768 , height=512 , generator=__A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
__a = output.images
assert image.shape == (1, 512, 768, 3)
| 99 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase ( snake_case__ : int )-> str:
A_ = 0
A_ = 0
while num > 0:
A_ = num % 8
A_ = octal + (remainder * math.floor(math.pow(10 , _lowercase ) ))
counter += 1
A_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(_lowercase )}'
def lowerCAmelCase ( )-> None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 709 |
from __future__ import annotations
__magic_name__ : List[Any] = 8.9_8_8e9 # units = N * m^s * C^-2
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float )-> dict[str, float]:
A_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
A_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A_ = (COULOMBS_CONSTANT * charge_product / abs(snake_case__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=False , __a=False , __a=False , __a=2 , __a=99 , __a=0 , __a=32 , __a=5 , __a=4 , __a=0.1 , __a=0.1 , __a=5_12 , __a=12 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a="last" , __a=None , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , 2).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaubertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , lengths=__a , langs=__a)
_UpperCamelCase = model(__a , langs=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaubertWithLMHeadModel(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = model(__a , start_positions=__a , end_positions=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaubertForQuestionAnswering(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
_UpperCamelCase = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(__a , start_positions=__a , end_positions=__a)
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaubertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self , __a , __a , __a=False) -> Dict:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = FlaubertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , emb_dim=37)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__a)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@slow
@require_torch_gpu
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=__a)
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = torch.jit.trace(
__a , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , '''traced_model.pt'''))
_UpperCamelCase = torch.jit.load(os.path.join(__a , '''traced_model.pt''') , map_location=__a)
loaded(inputs_dict['''input_ids'''].to(__a) , inputs_dict['''attention_mask'''].to(__a))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
| 19 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
a__ = None
a__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
a__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def A__ (snake_case : str , snake_case : Optional[Any]=1 , snake_case : Optional[int]=2_56 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A__ (snake_case : Dict ) -> Optional[Any]:
with open(snake_case , """r""" ) as f:
return json.load(snake_case )
def A__ (snake_case : List[Any] , snake_case : Any ) -> Dict:
with open(snake_case , """w""" ) as f:
json.dump(snake_case , snake_case )
def A__ (snake_case : Tuple , snake_case : List[Any] , snake_case : List[str] , snake_case : str=True ) -> int:
os.makedirs(snake_case , exist_ok=snake_case )
__UpperCamelCase : Optional[int] = os.path.join(snake_case , """tmp""" )
os.makedirs(snake_case , exist_ok=snake_case )
__UpperCamelCase : Any = read_json(os.path.join(snake_case , """params.json""" ) )
__UpperCamelCase : Dict = NUM_SHARDS[model_size]
__UpperCamelCase : List[Any] = params["""n_layers"""]
__UpperCamelCase : Any = params["""n_heads"""]
__UpperCamelCase : Optional[int] = n_heads // num_shards
__UpperCamelCase : Tuple = params["""dim"""]
__UpperCamelCase : Optional[int] = dim // n_heads
__UpperCamelCase : Dict = 10000.0
__UpperCamelCase : str = 1.0 / (base ** (torch.arange(0 , snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__UpperCamelCase : Any = params["""n_kv_heads"""] # for GQA / MQA
__UpperCamelCase : Optional[int] = n_heads_per_shard // num_key_value_heads
__UpperCamelCase : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
__UpperCamelCase : Optional[Any] = n_heads
__UpperCamelCase : Any = n_heads_per_shard
__UpperCamelCase : str = dim
# permute for sliced rotary
def permute(snake_case : Tuple , snake_case : Dict=n_heads , snake_case : str=dim , snake_case : Optional[Any]=dim ):
return w.view(snake_case , dima // n_heads // 2 , 2 , snake_case ).transpose(1 , 2 ).reshape(snake_case , snake_case )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__UpperCamelCase : Any = torch.load(os.path.join(snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
__UpperCamelCase : Optional[int] = [
torch.load(os.path.join(snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(snake_case )
]
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : List[str] = {"""weight_map""": {}}
for layer_i in range(snake_case ):
__UpperCamelCase : Tuple = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCamelCase : Optional[Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__UpperCamelCase : int = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__UpperCamelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case ) )
__UpperCamelCase : int = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case ) , snake_case , snake_case , snake_case , )
__UpperCamelCase : Dict = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
snake_case , snake_case , snake_case )
for i in range(snake_case )
] , dim=0 , ).reshape(snake_case , snake_case )
__UpperCamelCase : str = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case )] , dim=1 )
__UpperCamelCase : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case )] , dim=0 )
__UpperCamelCase : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case )] , dim=1 )
__UpperCamelCase : List[str] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case )] , dim=0 )
__UpperCamelCase : Dict = inv_freq
for k, v in state_dict.items():
__UpperCamelCase : str = filename
param_count += v.numel()
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
__UpperCamelCase : Dict = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCamelCase : Dict = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
__UpperCamelCase : Dict = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
__UpperCamelCase : str = filename
param_count += v.numel()
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
# Write configs
__UpperCamelCase : Union[str, Any] = {"""total_size""": param_count * 2}
write_json(snake_case , os.path.join(snake_case , """pytorch_model.bin.index.json""" ) )
__UpperCamelCase : str = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
__UpperCamelCase : str = params["""multiple_of"""] if """multiple_of""" in params else 2_56
__UpperCamelCase : str = LlamaConfig(
hidden_size=snake_case , intermediate_size=compute_intermediate_size(snake_case , snake_case , snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=snake_case , )
config.save_pretrained(snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
__UpperCamelCase : Optional[int] = LlamaForCausalLM.from_pretrained(snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(snake_case , safe_serialization=snake_case )
shutil.rmtree(snake_case )
def A__ (snake_case : Any , snake_case : str ) -> Union[str, Any]:
# Initialize the tokenizer based on the `spm` model
__UpperCamelCase : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__UpperCamelCase : Tuple = tokenizer_class(snake_case )
tokenizer.save_pretrained(snake_case )
def A__ () -> int:
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=snake_case , help="""Whether or not to save using `safetensors`.""" )
__UpperCamelCase : str = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__UpperCamelCase : int = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , snake_case )
if __name__ == "__main__":
main()
| 279 | 0 |
class __magic_name__ :
def __init__( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = n
UpperCAmelCase = [None] * self.n
UpperCAmelCase = 0 # index of the first element
UpperCAmelCase = 0
UpperCAmelCase = 0
def __len__( self : Dict ):
return self.size
def _UpperCAmelCase ( self : Dict ):
return self.size == 0
def _UpperCAmelCase ( self : List[Any] ):
return False if self.is_empty() else self.array[self.front]
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Optional[int] ):
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
UpperCAmelCase = data
UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCAmelCase ( self : Optional[Any] ):
if self.size == 0:
raise Exception("UNDERFLOW" )
UpperCAmelCase = self.array[self.front]
UpperCAmelCase = None
UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 707 |
def __UpperCamelCase ( _lowerCAmelCase = 10 ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or n < 0:
raise ValueError("Invalid input" )
UpperCAmelCase = 10**n
UpperCAmelCase = 2_84_33 * (pow(2 , 7_83_04_57 , _lowerCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(10) = }")
| 405 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 |
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int ) -> int:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A_ : Dict = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A_ : Optional[Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A_ : List[Any] = len(_lowerCAmelCase )
A_ : List[Any] = 0
A_ : Any = 0
A_ : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A_ : List[Any] = sorted_profit_by_weight[length - i - 1]
A_ : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A_ : List[Any] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
_lowerCAmelCase : int = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
_lowerCAmelCase : Dict = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
_lowerCAmelCase : Union[str, Any] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 454 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Dict = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 709 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 649 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.