code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase ( snake_case__ : str , snake_case__ : str ) -> bool:
UpperCamelCase : List[str] = len(snake_case__ )
UpperCamelCase : Any = len(snake_case__ )
UpperCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase : List[str] = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase : Tuple = True
if a[i].islower():
UpperCamelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
'''simple docstring'''
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowercase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowercase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ = None ) -> str:
'''simple docstring'''
lowerCamelCase_ = (
os.path.join(SCREAMING_SNAKE_CASE_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase_ = Extractor
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase_ = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
lowerCamelCase_ = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output_path
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
...
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
lowerCamelCase_ = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase_ = cls.read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
def resolved(SCREAMING_SNAKE_CASE_ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase_ = resolved(os.path.join(SCREAMING_SNAKE_CASE_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x1F\x8B']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with gzip.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as fp:
lowerCamelCase_ = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase_ = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
lowerCamelCase_ = struct.unpack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
lowerCamelCase_ = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_ , 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x42\x5A\x68']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
with bza.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [b'\x04\x22\x4D\x18']
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase( cls ) -> Union[str, Any]:
'''simple docstring'''
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ , magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
lowerCamelCase_ = cls._get_magic_number_max_length()
lowerCamelCase_ = cls._read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def UpperCamelCase( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
lowerCamelCase_ = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = extractor if extractor != 'deprecated' else extractor_format
else:
lowerCamelCase_ = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''gpt_bigcode'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[Any] , UpperCamelCase_: Tuple=50_257 , UpperCamelCase_: List[Any]=1_024 , UpperCamelCase_: str=768 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: str=None , UpperCamelCase_: List[str]="gelu_pytorch_tanh" , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Tuple=1E-5 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=50_256 , UpperCamelCase_: List[Any]=50_256 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: int=True , UpperCamelCase_: Union[str, Any]=True , **UpperCamelCase_: Any , ) -> str:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = n_inner
lowercase__ = activation_function
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = scale_attn_weights
lowercase__ = use_cache
lowercase__ = attention_softmax_in_fpaa
lowercase__ = scale_attention_softmax_in_fpaa
lowercase__ = multi_query
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 43 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Dict,__A : Optional[int]=1_3,__A : Optional[Any]=3_0,__A : Any=2,__A : Any=3,__A : List[Any]=True,__A : List[str]=True,__A : List[str]=3_2,__A : List[str]=2,__A : List[Any]=4,__A : int=3_7,__A : Any="gelu",__A : Optional[Any]=0.1,__A : List[Any]=0.1,__A : Optional[int]=1_0,__A : Optional[int]=0.02,__A : Any=3,__A : Optional[Any]=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[Any] = num_patches + 1
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Tuple ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__A,initializer_range=self.initializer_range,)
def lowerCamelCase_ ( self : Any,__A : Any,__A : Dict,__A : Any ):
_lowerCamelCase : int = TFViTModel(config=__A )
_lowerCamelCase : Dict = model(__A,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Dict = self.image_size // 2
_lowerCamelCase : Any = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : int = model(__A,interpolate_pos_encoding=__A,training=__A )
_lowerCamelCase : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Union[str, Any],__A : Any,__A : List[str],__A : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.type_sequence_label_size
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : Any = model(__A,labels=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCamelCase : Union[str, Any] = self.image_size // 2
_lowerCamelCase : int = pixel_values[:, :, :image_size, :image_size]
_lowerCamelCase : Any = model(__A,interpolate_pos_encoding=__A,training=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Dict = TFViTForImageClassification(__A )
_lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Any = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = TFViTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A,has_text_modality=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A,tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__A )
_lowerCamelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Tuple ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=__A,return_tensors="tf" )
# forward pass
_lowerCamelCase : Any = model(**__A )
# verify the logits
_lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3],__A,atol=1e-4 )
| 44 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
UpperCamelCase__ :List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCamelCase__ :Any = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def A ( lowercase__ : int ) -> int:
return binomial_coefficient(2 * node_count , lowercase__ ) // (node_count + 1)
def A ( lowercase__ : int ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
UpperCamelCase__ :Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( lowercase__ : int ) -> int:
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 45 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A_ ( nn.Module ):
def __init__( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=0.0 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: str = "geglu" ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: str = "layer_norm" ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : int = only_cross_attention
_lowerCamelCase : Tuple = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_lowerCamelCase : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCamelCase : Tuple = AdaLayerNorm(__lowerCAmelCase ,__lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowerCamelCase : Union[str, Any] = AdaLayerNormZero(__lowerCAmelCase ,__lowerCAmelCase )
else:
_lowerCamelCase : Dict = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Attention(
query_dim=__lowerCAmelCase ,heads=__lowerCAmelCase ,dim_head=__lowerCAmelCase ,dropout=__lowerCAmelCase ,bias=__lowerCAmelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=__lowerCAmelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCamelCase : int = (
AdaLayerNorm(__lowerCAmelCase ,__lowerCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
)
_lowerCamelCase : Union[str, Any] = Attention(
query_dim=__lowerCAmelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=__lowerCAmelCase ,dim_head=__lowerCAmelCase ,dropout=__lowerCAmelCase ,bias=__lowerCAmelCase ,upcast_attention=__lowerCAmelCase ,) # is self-attn if encoder_hidden_states is none
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Any = None
# 3. Feed-forward
_lowerCamelCase : Tuple = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
_lowerCamelCase : Any = FeedForward(__lowerCAmelCase ,dropout=__lowerCAmelCase ,activation_fn=__lowerCAmelCase ,final_dropout=__lowerCAmelCase )
# let chunk size default to None
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = 0
def _lowercase ( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Any = chunk_size
_lowerCamelCase : Dict = dim
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.FloatTensor] = None ,__lowerCAmelCase: Optional[torch.LongTensor] = None ,__lowerCAmelCase: Dict[str, Any] = None ,__lowerCAmelCase: Optional[torch.LongTensor] = None ,):
'''simple docstring'''
if self.use_ada_layer_norm:
_lowerCamelCase : List[str] = self.norma(__lowerCAmelCase ,__lowerCAmelCase )
elif self.use_ada_layer_norm_zero:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.norma(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,hidden_dtype=hidden_states.dtype )
else:
_lowerCamelCase : List[str] = self.norma(__lowerCAmelCase )
_lowerCamelCase : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCamelCase : str = self.attna(
__lowerCAmelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
if self.use_ada_layer_norm_zero:
_lowerCamelCase : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
_lowerCamelCase : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCamelCase : Optional[int] = (
self.norma(__lowerCAmelCase ,__lowerCAmelCase ) if self.use_ada_layer_norm else self.norma(__lowerCAmelCase )
)
_lowerCamelCase : Optional[int] = self.attna(
__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = attn_output + hidden_states
# 3. Feed-forward
_lowerCamelCase : int = self.norma(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowerCamelCase : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_lowerCamelCase : List[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCamelCase : Union[str, Any] = torch.cat(
[self.ff(__lowerCAmelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCAmelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
_lowerCamelCase : Any = self.ff(__lowerCAmelCase )
if self.use_ada_layer_norm_zero:
_lowerCamelCase : Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output
_lowerCamelCase : int = ff_output + hidden_states
return hidden_states
class A_ ( nn.Module ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 4 ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: str = "geglu" ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = int(dim * mult )
_lowerCamelCase : str = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCamelCase : str = GELU(__lowerCAmelCase ,__lowerCAmelCase )
if activation_fn == "gelu-approximate":
_lowerCamelCase : Any = GELU(__lowerCAmelCase ,__lowerCAmelCase ,approximate="tanh" )
elif activation_fn == "geglu":
_lowerCamelCase : Union[str, Any] = GEGLU(__lowerCAmelCase ,__lowerCAmelCase )
elif activation_fn == "geglu-approximate":
_lowerCamelCase : Optional[int] = ApproximateGELU(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(__lowerCAmelCase )
# project dropout
self.net.append(nn.Dropout(__lowerCAmelCase ) )
# project out
self.net.append(nn.Linear(__lowerCAmelCase ,__lowerCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCAmelCase ) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
for module in self.net:
_lowerCamelCase : int = module(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: str = "none" ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : int = approximate
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def _lowercase ( self: Dict ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.proj(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.gelu(__lowerCAmelCase )
return hidden_states
class A_ ( nn.Module ):
def __init__( self: str ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,dim_out * 2 )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.proj(__lowerCAmelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(__lowerCAmelCase )
class A_ ( nn.Module ):
def __init__( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Optional[Any] = nn.Linear(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.proj(__lowerCAmelCase )
return x * torch.sigmoid(1.7_02 * x )
class A_ ( nn.Module ):
def __init__( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Dict = nn.Embedding(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = nn.SiLU()
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,embedding_dim * 2 )
_lowerCamelCase : Dict = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.linear(self.silu(self.emb(__lowerCAmelCase ) ) )
_lowerCamelCase, _lowerCamelCase : Tuple = torch.chunk(__lowerCAmelCase ,2 )
_lowerCamelCase : Tuple = self.norm(__lowerCAmelCase ) * (1 + scale) + shift
return x
class A_ ( nn.Module ):
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Optional[Any] = CombinedTimestepLabelEmbeddings(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : List[Any] = nn.SiLU()
_lowerCamelCase : Union[str, Any] = nn.Linear(__lowerCAmelCase ,6 * embedding_dim ,bias=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(__lowerCAmelCase ,elementwise_affine=__lowerCAmelCase ,eps=1e-6 )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.linear(self.silu(self.emb(__lowerCAmelCase ,__lowerCAmelCase ,hidden_dtype=__lowerCAmelCase ) ) )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = emb.chunk(6 ,dim=1 )
_lowerCamelCase : int = self.norm(__lowerCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A_ ( nn.Module ):
def __init__( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: float = 1e-5 ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : str = num_groups
_lowerCamelCase : int = eps
if act_fn is None:
_lowerCamelCase : List[str] = None
else:
_lowerCamelCase : List[str] = get_activation(__lowerCAmelCase )
_lowerCamelCase : Dict = nn.Linear(__lowerCAmelCase ,out_dim * 2 )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
if self.act:
_lowerCamelCase : Optional[Any] = self.act(__lowerCAmelCase )
_lowerCamelCase : int = self.linear(__lowerCAmelCase )
_lowerCamelCase : int = emb[:, :, None, None]
_lowerCamelCase, _lowerCamelCase : List[Any] = emb.chunk(2 ,dim=1 )
_lowerCamelCase : List[str] = F.group_norm(__lowerCAmelCase ,self.num_groups ,eps=self.eps )
_lowerCamelCase : List[str] = x * (1 + scale) + shift
return x
| 46 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = size if size is not None else {'shortest_edge': 2_5_6}
__a : str = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__a : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
__a : Dict = do_resize
__a : Union[str, Any] = size
__a : Any = resample
__a : List[Any] = do_center_crop
__a : Any = crop_size
__a : Tuple = do_rescale
__a : Optional[int] = rescale_factor
__a : Optional[int] = do_normalize
__a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__a : int = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = do_resize if do_resize is not None else self.do_resize
__a : Optional[int] = size if size is not None else self.size
__a : int = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : Dict = resample if resample is not None else self.resample
__a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Optional[int] = crop_size if crop_size is not None else self.crop_size
__a : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : int = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
__a : Any = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__a : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
__a : Any = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : int = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 47 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :List[Any] = DiTPipeline
snake_case__ :List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ :List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
snake_case__ :Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ :Dict = False
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__magic_name__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__magic_name__ , )
lowerCAmelCase__ = AutoencoderKL()
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=0 ):
"""simple docstring"""
if str(__magic_name__ ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__magic_name__ )
else:
lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCAmelCase__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ )
lowerCAmelCase__ = pipe(**__magic_name__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCAmelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=__magic_name__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowerCAmelCase__ = ["vase", "umbrella", "white shark", "white wolf"]
lowerCAmelCase__ = pipe.get_label_ids(__magic_name__ )
lowerCAmelCase__ = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowerCAmelCase__ = ["vase", "umbrella"]
lowerCAmelCase__ = pipe.get_label_ids(__magic_name__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 48 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__UpperCAmelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCAmelCase = model(_lowercase )['''last_hidden_state''']
__UpperCAmelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
__UpperCAmelCase = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 49 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = '▁'
UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCamelCase : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCamelCase : str = {'vinai/bartpho-syllable': 10_24}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_file
lowerCamelCase__ = monolingual_vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ = {}
lowerCamelCase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = cnt
cnt += 1
with open(_lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCamelCase__ = line.strip().split()[0]
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase ,""" """ ).strip()
return out_string
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from collections import defaultdict
def __A ( a_ :int) -> int:
__a : Dict = 1
__a : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(a_)
if ret % 2 == 0:
cuts.append(a_)
return ret
def __A ( ) -> List[Any]:
dfs(1)
if __name__ == "__main__":
A , A = 10, 9
A = defaultdict(list)
A = {}
A = []
A = 0
A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 52 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
# General docstring
_snake_case : int = 'RegNetConfig'
# Base docstring
_snake_case : List[str] = 'facebook/regnet-y-040'
_snake_case : List[Any] = [1, 1088, 7, 7]
# Image classification docstring
_snake_case : List[Any] = 'facebook/regnet-y-040'
_snake_case : Union[str, Any] = 'tabby, tabby cat'
_snake_case : str = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[str] = "relu" , **lowerCAmelCase_ : Any , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCAmelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , strides=lowerCAmelCase_ , padding='VALID' , groups=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name='convolution' , )
__lowerCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
__lowerCAmelCase = self.convolution(self.padding(lowerCAmelCase_ ) )
__lowerCAmelCase = self.normalization(lowerCAmelCase_ )
__lowerCAmelCase = self.activation(lowerCAmelCase_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : RegNetConfig , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = config.num_channels
__lowerCAmelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = shape_list(lowerCAmelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCAmelCase = tf.transpose(lowerCAmelCase_ , perm=(0, 2, 3, 1) )
__lowerCAmelCase = self.embedder(lowerCAmelCase_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 , **lowerCAmelCase_ : Tuple ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=1 , strides=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name='convolution' )
__lowerCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def lowercase ( self : List[Any] , lowerCAmelCase_ : tf.Tensor , lowerCAmelCase_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(lowerCAmelCase_ ) , training=lowerCAmelCase_ )
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> str:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name='pooler' )
__lowerCAmelCase = [
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> Optional[int]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCAmelCase = self.pooler(lowerCAmelCase_ )
for layer_module in self.attention:
__lowerCAmelCase = layer_module(lowerCAmelCase_ )
__lowerCAmelCase = hidden_state * pooled
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = max(1 , out_channels // config.groups_width )
__lowerCAmelCase = (
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCAmelCase = [
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name='layer.2' ),
]
__lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> Union[str, Any]:
__lowerCAmelCase = hidden_state
for layer_module in self.layers:
__lowerCAmelCase = layer_module(lowerCAmelCase_ )
__lowerCAmelCase = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
__lowerCAmelCase = self.activation(lowerCAmelCase_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : List[str] ) -> str:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = max(1 , out_channels // config.groups_width )
__lowerCAmelCase = (
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__lowerCAmelCase = [
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name='layer.3' ),
]
__lowerCAmelCase = ACTaFN[config.hidden_act]
def lowercase ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = hidden_state
for layer_module in self.layers:
__lowerCAmelCase = layer_module(lowerCAmelCase_ )
__lowerCAmelCase = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
__lowerCAmelCase = self.activation(lowerCAmelCase_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCAmelCase = [
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , name='layers.0' ),
*[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowercase ( self : int , lowerCAmelCase_ : int ) -> str:
for layer_module in self.layers:
__lowerCAmelCase = layer_module(lowerCAmelCase_ )
return hidden_state
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : RegNetConfig , **lowerCAmelCase_ : str ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ , name=f"""stages.{i+1}""" ) )
def lowercase ( self : Any , lowerCAmelCase_ : tf.Tensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(lowerCAmelCase_ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ )
@keras_serializable
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
a_ = RegNetConfig
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[Any] ) -> str:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = config
__lowerCAmelCase = TFRegNetEmbeddings(lowerCAmelCase_ , name='embedder' )
__lowerCAmelCase = TFRegNetEncoder(lowerCAmelCase_ , name='encoder' )
__lowerCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name='pooler' )
@unpack_inputs
def lowercase ( self : str , lowerCAmelCase_ : tf.Tensor , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowerCAmelCase = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(lowerCAmelCase_ )
# Change to NCHW output format have uniformity in the modules
__lowerCAmelCase = tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) )
__lowerCAmelCase = tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCAmelCase = tuple([tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = RegNetConfig
a_ = """regnet"""
a_ = """pixel_values"""
@property
def lowercase ( self : Tuple ) -> List[str]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_snake_case : List[str] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_snake_case : Union[str, Any] = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCamelCase , )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : RegNetConfig , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> int:
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = TFRegNetMainLayer(lowerCAmelCase_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase ( self : int , lowerCAmelCase_ : tf.Tensor , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : List[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.regnet(
pixel_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCamelCase , )
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : RegNetConfig , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ) -> Dict:
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = TFRegNetMainLayer(lowerCAmelCase_ , name='regnet' )
# classification head
__lowerCAmelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : tf.Tensor = None , lowerCAmelCase_ : tf.Tensor = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : str=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.regnet(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier[0](lowerCAmelCase_ )
__lowerCAmelCase = self.classifier[1](lowerCAmelCase_ )
__lowerCAmelCase = None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
| 53 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import re
from filelock import FileLock
try:
import nltk
__lowercase : str =True
except (ImportError, ModuleNotFoundError):
__lowercase : Optional[Any] =False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def a__ ( lowercase__ ):
'''simple docstring'''
re.sub("<n>" , "" , lowercase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase__ ) )
| 54 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ):
__A = {}
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : str ,A : Optional[int]=1 ):
if self.graph.get(A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__A = [[w, v]]
if not self.graph.get(A ):
__A = []
def UpperCamelCase_ ( self : Any ):
return list(self.graph )
def UpperCamelCase_ ( self : Optional[Any] ,A : Union[str, Any] ,A : Dict ):
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
def UpperCamelCase_ ( self : List[Any] ,A : str=-2 ,A : List[str]=-1 ):
if s == d:
return []
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def UpperCamelCase_ ( self : str ,A : Union[str, Any]=-1 ):
if c == -1:
__A = floor(random() * 1_00_00 ) + 10
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__A = floor(random() * c ) + 1
if n != i:
self.add_pair(A ,A ,1 )
def UpperCamelCase_ ( self : int ,A : str=-2 ):
__A = deque()
__A = []
if s == -2:
__A = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
__A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self : Tuple ,A : int ):
__A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase_ ( self : Any ,A : List[str] ):
return len(self.graph[u] )
def UpperCamelCase_ ( self : Optional[int] ,A : Any=-2 ):
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = s
__A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return sorted_nodes
def UpperCamelCase_ ( self : List[Any] ):
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = False
indirect_parents.append(A )
__A = s
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def UpperCamelCase_ ( self : str ):
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = False
indirect_parents.append(A )
__A = s
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def UpperCamelCase_ ( self : Tuple ,A : Dict=-2 ,A : Any=-1 ):
__A = time()
self.dfs(A ,A )
__A = time()
return end - begin
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any]=-2 ):
__A = time()
self.bfs(A )
__A = time()
return end - begin
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
__A = {}
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ,A : Union[str, Any] ,A : Union[str, Any]=1 ):
# check if the u exists
if self.graph.get(A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__A = [[w, v]]
# add the other way
if self.graph.get(A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__A = [[w, u]]
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : Optional[Any] ):
if self.graph.get(A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A )
# the other way round
if self.graph.get(A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any]=-2 ,A : int=-1 ):
if s == d:
return []
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return visited
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict=-1 ):
if c == -1:
__A = floor(random() * 1_00_00 ) + 10
for i in range(A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__A = floor(random() * c ) + 1
if n != i:
self.add_pair(A ,A ,1 )
def UpperCamelCase_ ( self : List[str] ,A : Union[str, Any]=-2 ):
__A = deque()
__A = []
if s == -2:
__A = list(self.graph )[0]
d.append(A )
visited.append(A )
while d:
__A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase_ ( self : int ,A : Optional[Any] ):
return len(self.graph[u] )
def UpperCamelCase_ ( self : str ):
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = False
indirect_parents.append(A )
__A = s
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return list(A )
def UpperCamelCase_ ( self : Any ):
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(A )
visited.append(A )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(A ) != 0:
__A = stack[len(A ) - 1]
else:
__A = False
indirect_parents.append(A )
__A = s
__A = ss
# check if se have reached the starting point
if len(A ) == 0:
return False
def UpperCamelCase_ ( self : Union[str, Any] ):
return list(self.graph )
def UpperCamelCase_ ( self : Dict ,A : Any=-2 ,A : int=-1 ):
__A = time()
self.dfs(A ,A )
__A = time()
return end - begin
def UpperCamelCase_ ( self : str ,A : List[str]=-2 ):
__A = time()
self.bfs(A )
__A = time()
return end - begin
| 55 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : str = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "owlvit_text_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int=4_9408 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=2048 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Any="quick_gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , SCREAMING_SNAKE_CASE_ : List[Any]=4_9406 , SCREAMING_SNAKE_CASE_ : int=4_9407 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = max_position_embeddings
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = attention_dropout
__snake_case = initializer_range
__snake_case = initializer_factor
@classmethod
def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__snake_case = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = "owlvit_vision_model"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=768 , SCREAMING_SNAKE_CASE_ : List[Any]=3072 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Any=768 , SCREAMING_SNAKE_CASE_ : Optional[int]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="quick_gelu" , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1.0 , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = hidden_act
__snake_case = layer_norm_eps
__snake_case = attention_dropout
__snake_case = initializer_range
__snake_case = initializer_factor
@classmethod
def a ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "owlvit"
_SCREAMING_SNAKE_CASE : int = True
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=512 , SCREAMING_SNAKE_CASE_ : Dict=2.6_5_9_2 , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
__snake_case = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__snake_case = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__snake_case = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = projection_dim
__snake_case = logit_scale_init_value
__snake_case = return_dict
__snake_case = 1.0
@classmethod
def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def a ( cls : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
__snake_case = {}
__snake_case = text_config
__snake_case = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> str:
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
class _lowercase ( __lowercase ):
@property
def a ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def a ( self : Optional[int] ) -> float:
return 1e-4
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "ProcessorMixin" , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
__snake_case = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__snake_case = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def a ( self : Tuple ) -> int:
return 14
| 56 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A_ : str = logging.getLogger(__name__)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self ):
UpperCamelCase_: Optional[int] = False
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not self.initialized:
UpperCamelCase_: Optional[Any] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: str = True
def _a ( self ):
self.retriever.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Any = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def _a ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCamelCase_: Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCamelCase_ ,UpperCamelCase_: str = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
UpperCamelCase_ ,UpperCamelCase_: Dict = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_: List[str] = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
UpperCamelCase_: List[str] = rag_tokenizer.question_encoder
UpperCamelCase_: List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCamelCase_: Union[str, Any] = 'custom'
UpperCamelCase_: int = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
UpperCamelCase_: str = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
| 57 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__A = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
requires_backends(self , ["bs4"])
super().__init__(**UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =[]
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[Any] =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCamelCase__: Any =parent.find_all(child.name , recursive=UpperCAmelCase_)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(UpperCAmelCase_) else next(i for i, s in enumerate(UpperCAmelCase_ , 1) if s is child))
lowerCamelCase__: Union[str, Any] =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: Dict =BeautifulSoup(UpperCAmelCase_ , "html.parser")
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Any =[]
for element in html_code.descendants:
if type(UpperCAmelCase_) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
lowerCamelCase__: Tuple =html.unescape(UpperCAmelCase_).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.xpath_soup(UpperCAmelCase_)
stringaxtag_seq.append(UpperCAmelCase_)
stringaxsubs_seq.append(UpperCAmelCase_)
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError("Number of doc strings and xtags does not correspond")
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError("Number of doc strings and xsubs does not correspond")
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =""
for tagname, subs in zip(UpperCAmelCase_ , UpperCAmelCase_):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__(self : Tuple , UpperCAmelCase_ : Any) ->BatchFeature:
'''simple docstring'''
lowerCamelCase__: List[str] =False
# Check that strings has a valid type
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =True
elif isinstance(UpperCAmelCase_ , (list, tuple)):
if len(UpperCAmelCase_) == 0 or isinstance(html_strings[0] , UpperCAmelCase_):
lowerCamelCase__: str =True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCAmelCase_)}.""")
lowerCamelCase__: Tuple =bool(isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase_)))
if not is_batched:
lowerCamelCase__: List[Any] =[html_strings]
# Get nodes + xpaths
lowerCamelCase__: List[str] =[]
lowerCamelCase__: List[str] =[]
for html_string in html_strings:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =self.get_three_from_single(UpperCAmelCase_)
nodes.append(UpperCAmelCase_)
lowerCamelCase__: Any =[]
for node, tag_list, sub_list in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[Any] =self.construct_xpath(UpperCAmelCase_ , UpperCAmelCase_)
xpath_strings.append(UpperCAmelCase_)
xpaths.append(UpperCAmelCase_)
# return as Dict
lowerCamelCase__: int ={"nodes": nodes, "xpaths": xpaths}
lowerCamelCase__: int =BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
return encoded_inputs
| 59 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCamelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCamelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCamelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : Tuple ) -> Any:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Tuple:
lowerCAmelCase__ = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
lowerCAmelCase__ = TER(
normalized=SCREAMING_SNAKE_CASE__ , no_punct=SCREAMING_SNAKE_CASE__ , asian_support=SCREAMING_SNAKE_CASE__ , case_sensitive=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = sb_ter.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 61 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Tuple = '''BlipImageProcessor'''
UpperCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : str , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE : str = self.tokenizer
SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _A ( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 62 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
from datetime import datetime as dt
import os
from github import Github
lowercase_ : int = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def A__ ( ):
SCREAMING_SNAKE_CASE__: int= Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__: int= repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__: Optional[int]= sorted([comment for comment in issue.get_comments()] , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 64 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] ,A : str ,A : str=13 ,A : Optional[int]=7 ,A : str=True ,A : Any=True ,A : Union[str, Any]=True ,A : List[Any]=True ,A : Union[str, Any]=99 ,A : Optional[Any]=32 ,A : List[str]=5 ,A : Union[str, Any]=4 ,A : Tuple=37 ,A : List[Any]="gelu" ,A : Tuple=0.1 ,A : Optional[int]=0.1 ,A : Optional[int]=512 ,A : List[str]=16 ,A : Any=2 ,A : int=0.0_2 ,A : Any=4 ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : List[str] = use_attention_mask
UpperCAmelCase__ : Dict = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Dict = num_choices
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_attention_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Any = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = True
snake_case_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxRobertaModelTester(self )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Any = model_class_name.from_pretrained("""roberta-base""" ,from_pt=A )
UpperCAmelCase__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
| 65 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Union[str, Any] = 0
while number > 0:
_lowercase : List[Any] = number % 10
sum_of_digits += last_digit
_lowercase : Tuple = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __magic_name__ ( SCREAMING_SNAKE_CASE = 100 ) -> int:
_lowercase : List[Any] = factorial(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = split_and_add(SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 66 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def SCREAMING_SNAKE_CASE__ ( snake_case__ :BertModel , snake_case__ :str , snake_case__ :str ) -> int:
_lowercase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_lowercase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
_lowercase = model.state_dict()
def to_tf_var_name(snake_case__ :str ):
for patt, repl in iter(snake_case__ ):
_lowercase = name.replace(snake_case__ , snake_case__ )
return F"""bert/{name}"""
def create_tf_var(snake_case__ :np.ndarray , snake_case__ :str , snake_case__ :tf.Session ):
_lowercase = tf.dtypes.as_dtype(tensor.dtype )
_lowercase = tf.get_variable(dtype=snake_case__ , shape=tensor.shape , name=snake_case__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowercase = to_tf_var_name(snake_case__ )
_lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowercase = torch_tensor.T
_lowercase = create_tf_var(tensor=snake_case__ , name=snake_case__ , session=snake_case__ )
tf.keras.backend.set_value(snake_case__ , snake_case__ )
_lowercase = session.run(snake_case__ )
print(F"""Successfully created {tf_name}: {np.allclose(snake_case__ , snake_case__ )}""" )
_lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__ , os.path.join(snake_case__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any=None ) -> List[str]:
_lowercase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=snake_case__ , required=snake_case__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=snake_case__ , required=snake_case__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=snake_case__ , required=snake_case__ , help='Directory in which to save tensorflow model' )
_lowercase = parser.parse_args(snake_case__ )
_lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 67 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : str , a_ : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : List[str] , a_ : int=1 , a_ : str=False , **a_ : Optional[int] ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = vocab_size
__snake_case = d_embed
__snake_case = d_proj
__snake_case = cutoffs + [vocab_size]
__snake_case = [0] + self.cutoffs
__snake_case = div_val
__snake_case = self.cutoffs[0]
__snake_case = len(self.cutoffs ) - 1
__snake_case = self.shortlist_size + self.n_clusters
__snake_case = keep_order
__snake_case = []
__snake_case = []
def A ( self : int , a_ : List[str] ):
"""simple docstring"""
if self.n_clusters > 0:
__snake_case = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=a_ , name="cluster_weight" )
__snake_case = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=a_ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__snake_case = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=a_ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(a_ )
else:
self.out_projs.append(a_ )
__snake_case = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=a_ , name=f'''out_layers_._{i}_._weight''' , )
__snake_case = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=a_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.d_embed // (self.div_val**i)
__snake_case = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=a_ , name=f'''out_projs_._{i}''' )
self.out_projs.append(a_ )
__snake_case = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=a_ , name=f'''out_layers_._{i}_._weight''' , )
__snake_case = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=a_ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(a_ )
@staticmethod
def A ( a_ : str , a_ : Tuple , a_ : Tuple , a_ : List[str]=None ):
"""simple docstring"""
__snake_case = x
if proj is not None:
__snake_case = tf.einsum("ibd,ed->ibe" , a_ , a_ )
return tf.einsum("ibd,nd->ibn" , a_ , a_ ) + b
@staticmethod
def A ( a_ : int , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = shape_list(a_ )
__snake_case = tf.range(lp_size[0] , dtype=target.dtype )
__snake_case = tf.stack([r, target] , 1 )
return tf.gather_nd(a_ , a_ )
def A ( self : Optional[int] , a_ : Dict , a_ : Dict , a_ : Tuple=True , a_ : Optional[Any]=False ):
"""simple docstring"""
__snake_case = 0
if self.n_clusters == 0:
__snake_case = self._logit(a_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__snake_case = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a_ , logits=a_ )
__snake_case = tf.nn.log_softmax(a_ , axis=-1 )
else:
__snake_case = shape_list(a_ )
__snake_case = []
__snake_case = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__snake_case , __snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__snake_case = (target >= l_idx) & (target < r_idx)
__snake_case = tf.where(a_ )
__snake_case = tf.boolean_mask(a_ , a_ ) - l_idx
if self.div_val == 1:
__snake_case = self.out_layers[0][0][l_idx:r_idx]
__snake_case = self.out_layers[0][1][l_idx:r_idx]
else:
__snake_case = self.out_layers[i][0]
__snake_case = self.out_layers[i][1]
if i == 0:
__snake_case = tf.concat([cur_W, self.cluster_weight] , 0 )
__snake_case = tf.concat([cur_b, self.cluster_bias] , 0 )
__snake_case = self._logit(a_ , a_ , a_ , self.out_projs[0] )
__snake_case = tf.nn.log_softmax(a_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__snake_case = tf.boolean_mask(a_ , a_ )
__snake_case = self._gather_logprob(a_ , a_ )
else:
__snake_case = self._logit(a_ , a_ , a_ , self.out_projs[i] )
__snake_case = tf.nn.log_softmax(a_ )
__snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
__snake_case = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a_ )
if target is not None:
__snake_case = tf.boolean_mask(a_ , a_ )
__snake_case = tf.boolean_mask(a_ , a_ )
__snake_case = self._gather_logprob(a_ , a_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a_ , -cur_logprob , shape_list(a_ ) )
__snake_case = tf.concat(a_ , axis=-1 )
if target is not None:
if return_mean:
__snake_case = tf.reduce_mean(a_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a_ , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 69 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase : Tuple = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase : Optional[Any] = concatenate_datasets
lowerCamelCase : int = DownloadConfig
lowerCamelCase : str = DownloadManager
lowerCamelCase : Dict = DownloadMode
lowerCamelCase : int = DownloadConfig
lowerCamelCase : Union[str, Any] = DownloadMode
lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _snake_case :
@property
def UpperCamelCase__ ( self ):
return self.get_dummy_input()
@property
def UpperCamelCase__ ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCamelCase__ ( self ,_snake_case=True ,_snake_case=False ,_snake_case=False ,_snake_case=False ,):
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Any = 32
UpperCAmelCase_ : Tuple = (32, 32)
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : str = torch.device(_snake_case )
UpperCAmelCase_ : Optional[Any] = (batch_size, num_channels) + sizes
UpperCAmelCase_ : Tuple = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case )
UpperCAmelCase_ : Union[str, Any] = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Any = randn_tensor((batch_size, temb_channels) ,generator=_snake_case ,device=_snake_case )
if include_res_hidden_states_tuple:
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(1 )
UpperCAmelCase_ : List[str] = (randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ),)
if include_encoder_hidden_states:
UpperCAmelCase_ : int = floats_tensor((batch_size, 32, 32) ).to(_snake_case )
if include_skip_sample:
UpperCAmelCase_ : str = randn_tensor(((batch_size, 3) + sizes) ,generator=_snake_case ,device=_snake_case )
return dummy_input
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 1_28,
}
if self.block_type == "up":
UpperCAmelCase_ : List[str] = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = self.block_class(**_snake_case )
unet_block.to(_snake_case )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = unet_block(**_snake_case )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = output[0]
self.assertEqual(output.shape ,self.output_shape )
UpperCAmelCase_ : Union[str, Any] = output[0, -1, -3:, -3:]
UpperCAmelCase_ : Tuple = torch.tensor(_snake_case ).to(_snake_case )
assert torch_all_close(output_slice.flatten() ,_snake_case ,atol=5E-3 )
@unittest.skipIf(torch_device == "mps" ,"Training is not supported in mps" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Tuple = self.block_class(**_snake_case )
model.to(_snake_case )
model.train()
UpperCAmelCase_ : List[Any] = model(**_snake_case )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = output[0]
UpperCAmelCase_ : int = torch.device(_snake_case )
UpperCAmelCase_ : List[str] = randn_tensor(output.shape ,device=_snake_case )
UpperCAmelCase_ : str = torch.nn.functional.mse_loss(_snake_case ,_snake_case )
loss.backward()
| 71 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def _A( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
lowercase , lowercase ={}, {}
if padding is not None:
lowercase =padding
if truncation is not None:
lowercase =truncation
if top_k is not None:
lowercase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ ):
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
lowercase ={'''image''': image, '''question''': question}
else:
lowercase =image
lowercase =super().__call__(snake_case_ , **snake_case_ )
return results
def _A( self , snake_case_ , snake_case_=False , snake_case_=False ):
lowercase =load_image(inputs['''image'''] )
lowercase =self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
lowercase =self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def _A( self , snake_case_ ):
lowercase =self.model(**snake_case_ )
return model_outputs
def _A( self , snake_case_ , snake_case_=5 ):
if top_k > self.model.config.num_labels:
lowercase =self.model.config.num_labels
if self.framework == "pt":
lowercase =model_outputs.logits.sigmoid()[0]
lowercase , lowercase =probs.topk(snake_case_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase =scores.tolist()
lowercase =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 72 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string')),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'])),
'answers': datasets.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
'id': datasets.Value('int64'),
})
SCREAMING_SNAKE_CASE = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_UpperCAmelCase)),
} , features=_UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'file.arrow')
dataset.map(cache_file_name=_UpperCAmelCase)
return filename
# FILE_CONTENT + files
a_ : Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt'
SCREAMING_SNAKE_CASE = FILE_CONTENT
with open(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase)
return filename
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
import bza
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.bz2'
SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8')
with bza.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
import gzip
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'file.txt.gz')
SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8')
with gzip.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.lz4'
SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8')
with lza.frame.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.7z'
with pyazr.SevenZipFile(_UpperCAmelCase , 'w') as archive:
archive.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import tarfile
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w') as f:
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
import lzma
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.xz'
SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8')
with lzma.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import zipfile
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.txt.zst'
SCREAMING_SNAKE_CASE = bytes(_UpperCAmelCase , 'utf-8')
with zstd.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'file.xml'
SCREAMING_SNAKE_CASE = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>')
with open(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase)
return filename
a_ : Tuple = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
a_ : Any = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
a_ : List[Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
a_ : Dict = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
a_ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session')
def lowerCamelCase__ ():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = datasets.Dataset.from_dict(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.arrow')
dataset.map(cache_file_name=_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.sqlite')
with contextlib.closing(sqlitea.connect(_UpperCAmelCase)) as con:
SCREAMING_SNAKE_CASE = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)')
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values()))
con.commit()
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.csv')
with open(_UpperCAmelCase , 'w' , newline='') as f:
SCREAMING_SNAKE_CASE = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.csv')
with open(_UpperCAmelCase , 'w' , newline='') as f:
SCREAMING_SNAKE_CASE = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'])
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import bza
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.bz2'
with open(_UpperCAmelCase , 'rb') as f:
SCREAMING_SNAKE_CASE = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_UpperCAmelCase , 'wb') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV')))
f.write(_UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV')))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.parquet')
SCREAMING_SNAKE_CASE = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
})
with open(_UpperCAmelCase , 'wb') as f:
SCREAMING_SNAKE_CASE = pq.ParquetWriter(_UpperCAmelCase , schema=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCAmelCase))] for k in DATA[0]} , schema=_UpperCAmelCase)
writer.write_table(_UpperCAmelCase)
writer.close()
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.json')
SCREAMING_SNAKE_CASE = {'data': DATA}
with open(_UpperCAmelCase , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.json')
SCREAMING_SNAKE_CASE = {'data': DATA_DICT_OF_LISTS}
with open(_UpperCAmelCase , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl')
with open(_UpperCAmelCase , 'w') as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase) + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.jsonl')
with open(_UpperCAmelCase , 'w') as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase) + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset_312.jsonl')
with open(_UpperCAmelCase , 'w') as f:
for item in DATA_312:
f.write(json.dumps(_UpperCAmelCase) + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset-str.jsonl')
with open(_UpperCAmelCase , 'w') as f:
for item in DATA_STR:
f.write(json.dumps(_UpperCAmelCase) + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import gzip
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.txt.gz')
with open(_UpperCAmelCase , 'rb') as orig_file:
with gzip.open(_UpperCAmelCase , 'wb') as zipped_file:
zipped_file.writelines(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
import gzip
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.jsonl.gz')
with open(_UpperCAmelCase , 'rb') as orig_file:
with gzip.open(_UpperCAmelCase , 'wb') as zipped_file:
zipped_file.writelines(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase)))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w') as f:
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w') as f:
f.add(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase)))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset.txt')
with open(_UpperCAmelCase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset2.txt')
with open(_UpperCAmelCase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.abc'
with open(_UpperCAmelCase , 'w') as f:
for item in data:
f.write(item + '\n')
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.text.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase)))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.ext.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported.ext'))
f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext'))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'])
SCREAMING_SNAKE_CASE = str(tmp_path_factory.mktemp('data') / 'dataset_with_unicode_new_lines.txt')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(_UpperCAmelCase)
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ ():
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg')
@pytest.fixture(scope='session')
def lowerCamelCase__ ():
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav')
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data') / 'dataset.img.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w') as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase))
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase).replace('.jpg' , '2.jpg'))
return path
@pytest.fixture(scope='session')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data_dir')
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / 'subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w') as f:
f.write('bar\n' * 10)
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w') as f:
f.write('foo\n' * 10)
with open(data_dir / '.subdir' / 'test.txt' , 'w') as f:
f.write('bar\n' * 10)
return data_dir
| 73 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''informer'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Tuple , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "student_t" , _A : str = "nll" , _A : int = 1 , _A : List[int] = None , _A : Optional[Union[str, bool]] = "mean" , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : Optional[List[int]] = None , _A : Optional[List[int]] = None , _A : int = 64 , _A : int = 32 , _A : int = 32 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : bool = True , _A : str = "gelu" , _A : float = 0.05 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : int = 100 , _A : float = 0.02 , _A : Union[str, Any]=True , _A : str = "prob" , _A : int = 5 , _A : bool = True , **_A : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = prediction_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = context_length or prediction_length
__SCREAMING_SNAKE_CASE : Optional[int] = distribution_output
__SCREAMING_SNAKE_CASE : Optional[int] = loss
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_size
__SCREAMING_SNAKE_CASE : Any = num_time_features
__SCREAMING_SNAKE_CASE : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE : Tuple = scaling
__SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features
__SCREAMING_SNAKE_CASE : str = num_static_real_features
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : List[Any] = cardinality
else:
__SCREAMING_SNAKE_CASE : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : Any = embedding_dimension
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE : str = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE : List[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
__SCREAMING_SNAKE_CASE : Optional[int] = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
__SCREAMING_SNAKE_CASE : List[str] = decoder_layers
__SCREAMING_SNAKE_CASE : List[str] = dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
__SCREAMING_SNAKE_CASE : Any = activation_dropout
__SCREAMING_SNAKE_CASE : int = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
__SCREAMING_SNAKE_CASE : Optional[Any] = init_std
__SCREAMING_SNAKE_CASE : List[str] = use_cache
# Informer
__SCREAMING_SNAKE_CASE : Optional[int] = attention_type
__SCREAMING_SNAKE_CASE : int = sampling_factor
__SCREAMING_SNAKE_CASE : Optional[Any] = distil
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 74 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Optional[int] = to_pil_image(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = pil_image.size
UpperCAmelCase__ : Any = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type='''dict''' , config=lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCAmelCase__ : Union[str, Any] = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()]
UpperCAmelCase__ : str = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
UpperCAmelCase__ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
UpperCAmelCase__ : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
UpperCAmelCase__ : Optional[Any] = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
UpperCAmelCase__ : str = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ : Optional[Any] = []
for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase__ )
# finally, normalize the bounding boxes
UpperCAmelCase__ : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : Tuple , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : float = 1 / 255 , _A : bool = True , _A : Union[float, Iterable[float]] = None , _A : Union[float, Iterable[float]] = None , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : Tuple , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Optional[int] = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : Union[str, Any] = get_size_dict(_A )
UpperCAmelCase__ : Tuple = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : Tuple = rescale_value
UpperCAmelCase__ : Optional[Any] = do_normalize
UpperCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ : Optional[int] = apply_ocr
UpperCAmelCase__ : List[Any] = ocr_lang
UpperCAmelCase__ : Optional[int] = tesseract_config
def lowercase_ ( self : List[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase__ : Dict = (size['''height'''], size['''width'''])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[int] , _A : np.ndarray , _A : Union[float, Iterable[float]] , _A : Union[float, Iterable[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : str=None , _A : bool = None , _A : float = None , _A : bool = None , _A : Union[float, Iterable[float]] = None , _A : Union[float, Iterable[float]] = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Any = get_size_dict(_A )
UpperCAmelCase__ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ : Dict = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Tuple = [to_numpy_array(_A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
UpperCAmelCase__ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Tuple = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=_A )
if apply_ocr:
UpperCAmelCase__ : Optional[int] = words_batch
UpperCAmelCase__ : Any = boxes_batch
return data
| 75 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_text_model"
def __init__( self , UpperCamelCase_=3_05_24 , UpperCamelCase_=7_68 , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=5_12 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-12 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3_05_22 , UpperCamelCase_=2 , UpperCamelCase_=0 , UpperCamelCase_=1_02 , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ) -> Dict:
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , sep_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = encoder_hidden_size
__lowercase : Optional[Any] = intermediate_size
__lowercase : str = projection_dim
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Tuple = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = max_position_embeddings
__lowercase : List[str] = layer_norm_eps
__lowercase : List[str] = hidden_act
__lowercase : List[Any] = initializer_range
__lowercase : str = attention_probs_dropout_prob
__lowercase : Union[str, Any] = is_decoder
__lowercase : Optional[Any] = use_cache
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : Tuple = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip_vision_model"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_84 , UpperCamelCase_=16 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-10 , **UpperCamelCase_ , ) -> int:
super().__init__(**UpperCamelCase_ )
__lowercase : Dict = hidden_size
__lowercase : Tuple = intermediate_size
__lowercase : Any = projection_dim
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Union[str, Any] = patch_size
__lowercase : Tuple = image_size
__lowercase : Optional[int] = initializer_range
__lowercase : int = attention_dropout
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[str] = hidden_act
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_ )
__lowercase ,__lowercase : Optional[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
__lowercase : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="blip"
UpperCamelCase =True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=5_12 , UpperCamelCase_=2.6_5_9_2 , UpperCamelCase_=2_56 , **UpperCamelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase_ )
if text_config is None:
__lowercase : Tuple = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
__lowercase : Optional[Any] = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
__lowercase : Tuple = BlipTextConfig(**UpperCamelCase_ )
__lowercase : Optional[int] = BlipVisionConfig(**UpperCamelCase_ )
__lowercase : str = self.vision_config.hidden_size
__lowercase : List[str] = projection_dim
__lowercase : Optional[Any] = logit_scale_init_value
__lowercase : Any = 1.0
__lowercase : str = 0.0_2
__lowercase : int = image_text_hidden_size
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = copy.deepcopy(self.__dict__ )
__lowercase : Union[str, Any] = self.text_config.to_dict()
__lowercase : Tuple = self.vision_config.to_dict()
__lowercase : List[Any] = self.__class__.model_type
return output
| 76 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
A = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase : List[str] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
__UpperCAmelCase : Tuple = config_class.from_json_file(UpperCamelCase )
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = True
print(f"Building TensorFlow model from configuration: {config}" )
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase : int = cached_file(
UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase , UpperCamelCase )
if compare_with_pt_model:
__UpperCAmelCase : int = tf_model(tf_model.dummy_inputs , training=UpperCamelCase ) # build the network
__UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCamelCase , config=UpperCamelCase , state_dict=UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Dict = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase : Optional[int] = pto[0].numpy()
__UpperCAmelCase : List[str] = tfo[0].numpy()
__UpperCAmelCase : List[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(f"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(UpperCamelCase , save_format="h5" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> Any:
"""simple docstring"""
if args_model_type is None:
__UpperCAmelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase : List[Any] = [args_model_type]
for j, model_type in enumerate(UpperCamelCase , start=1 ):
print("=" * 100 )
print(f" Converting model type {j}/{len(UpperCamelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCamelCase , UpperCamelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__UpperCAmelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
f" Converting checkpoint {i}/{len(UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase : Dict = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : Optional[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase : int = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCamelCase ):
__UpperCAmelCase : Tuple = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=UpperCamelCase , pytorch_checkpoint_path=UpperCamelCase , config_file=UpperCamelCase , tf_dump_path=os.path.join(UpperCamelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=UpperCamelCase , )
if remove_cached_files:
os.remove(UpperCamelCase )
os.remove(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
A = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 77 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: List[str] =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = ReformerTokenizer
a__ : Optional[Any] = ReformerTokenizerFast
a__ : Any = True
a__ : Union[str, Any] = False
a__ : int = True
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : int ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__a ) , 1000 )
def _lowercase (self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] , __a : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
UpperCAmelCase_ = "This is a simple input"
UpperCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase_ = ("This is a simple input", "This is a pair")
UpperCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ReformerTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowercase (self : Optional[int] ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
UpperCAmelCase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def _lowercase (self : Tuple ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(__a )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(__a , return_tensors="pt" )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
UpperCAmelCase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ = encoded_sequence["input_ids"].shape
UpperCAmelCase_ = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def _lowercase (self : Dict ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__a , sequences=__a , )
| 78 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = MobileBertTokenizer
__lowerCamelCase = MobileBertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
__lowerCamelCase = 'google/mobilebert-uncased'
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# With lower casing
UpperCAmelCase__ : Tuple = self.get_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = i
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
UpperCAmelCase__ : Any = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
UpperCAmelCase__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ["""的""", """人""", """有"""]
UpperCAmelCase__ : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : List[str] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Union[str, Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "falcon"
__UpperCAmelCase : str = ["past_key_values"]
def __init__( self : Optional[Any] , lowerCamelCase : str=65024 , lowerCamelCase : str=4544 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=71 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : str=None , lowerCamelCase : List[str]=False , lowerCamelCase : int=False , lowerCamelCase : str=True , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=False , lowerCamelCase : Optional[int]=11 , lowerCamelCase : Optional[int]=11 , **lowerCamelCase : Dict , ) -> Optional[Any]:
__snake_case : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : Optional[Any] = kwargs.pop("n_embed" , lowerCamelCase )
__snake_case : str = hidden_size if n_embed is None else n_embed
__snake_case : List[str] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Tuple = layer_norm_epsilon
__snake_case : List[str] = initializer_range
__snake_case : Tuple = use_cache
__snake_case : List[Any] = hidden_dropout
__snake_case : Tuple = attention_dropout
__snake_case : int = bos_token_id
__snake_case : Any = eos_token_id
__snake_case : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
__snake_case : Optional[Any] = alibi
__snake_case : Any = new_decoder_architecture
__snake_case : List[str] = multi_query # Ignored when new_decoder_architecture is True
__snake_case : int = parallel_attn
__snake_case : Union[str, Any] = bias
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Dict ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def __snake_case ( self : Any ) -> List[Any]:
return not self.alibi
| 81 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : str=0.01 , __lowerCAmelCase : Any=1_0_0_0 ):
"""simple docstring"""
_lowerCamelCase : List[str] = p_stop
_lowerCamelCase : List[Any] = max_length
def __iter__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : int = random.random() < self.p_stop
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
_lowerCamelCase : List[str] = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : str = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
_lowerCamelCase : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : int = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Any = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
_lowerCamelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : Any = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=False ):
"""simple docstring"""
random.seed(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Union[str, Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
_lowerCamelCase : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Union[str, Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
_lowerCamelCase : List[str] = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 4_2
_lowerCamelCase : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
_lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=__lowerCAmelCase )
_lowerCamelCase : Dict = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Dict = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_lowerCamelCase : str = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
Accelerator()
_lowerCamelCase : Any = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 83 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 84 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : int = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
from PIL import Image
def __snake_case ( __UpperCamelCase : Image ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__UpperCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__a :Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 86 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : int , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[Any]) ->Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[str]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int) ->str:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str) ->str:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str]) ->int:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Tuple) ->int:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int]) ->str:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : str , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any]) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''']
def __init__( self : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Any) ->Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''])
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 100 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = x_start
_lowerCamelCase : Optional[int] = fnc(__snake_case )
_lowerCamelCase : Optional[int] = 0.0
for _ in range(__snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : int = (x_end - x_start) / steps + xa
_lowerCamelCase : List[Any] = fnc(__snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : List[Any] = xa
_lowerCamelCase : Any = fxa
return area
if __name__ == "__main__":
def _snake_case ( __snake_case : List[Any] ):
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCAmelCase = 10
while i <= 10_0000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> int:
if config_name_or_path is None:
_lowercase : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
_lowercase : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowercase : str = question_encoder_name_or_path
_lowercase : Dict = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
_lowercase : Dict = RagConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Any = gen_config
_lowercase : Dict = question_encoder_config
_lowercase : List[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
rag_model.save_pretrained(lowerCamelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase_ )
# Save tokenizers.
_lowercase : int = AutoTokenizer.from_pretrained(lowerCamelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 89 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase = random.Random()
def _snake_case ( A , A=1.0 , A=None , A=None ) -> int:
if rng is None:
lowerCAmelCase__ = global_rng
lowerCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=24 , lowerCamelCase_=24 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = min_seq_length
lowerCAmelCase__ = max_seq_length
lowerCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = return_attention_mask
lowerCAmelCase__ = do_normalize
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> List[str]:
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCAmelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ = np.asarray(lowerCamelCase_ )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = [np.sum(lowerCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''max_length''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=4 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCAmelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ = feature_extractor(
lowerCamelCase_ , padding='''longest''' , max_length=16 , truncation=lowerCamelCase_ , return_tensors='''np''' , return_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = inputs.input_features
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
import torch
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
from datasets import load_dataset
lowerCAmelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# fmt: off
lowerCAmelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
lowerCAmelCase__ = self._load_datasamples(1 )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowerCamelCase_ , atol=1e-4 ) )
| 90 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 91 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowercase : Dict =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Tuple =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='''crop_size''' )
lowercase : Dict =do_resize
lowercase : Tuple =size
lowercase : List[Any] =resample
lowercase : Any =do_center_crop
lowercase : Optional[Any] =crop_size
lowercase : List[Any] =do_rescale
lowercase : List[str] =rescale_factor
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : List[Any] =do_convert_rgb
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
lowercase : Any =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : List[str] =get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Optional[int] =size if size is not None else self.size
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , param_name='''size''' , default_to_square=UpperCAmelCase__ )
lowercase : int =resample if resample is not None else self.resample
lowercase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
lowercase : int =get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' , default_to_square=UpperCAmelCase__ )
lowercase : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowercase : List[str] =image_std if image_std is not None else self.image_std
lowercase : Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Tuple =[convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase : Any =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowercase : Dict =[self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
lowercase : Optional[int] =[self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : int =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowercase : Any =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="pt" ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = {'add_prefix_space': True} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not line.startswith(' ' ) else {}
lowerCAmelCase__ :Any = padding_side
return tokenizer(
[line] , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' if pad_to_max_length else None , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = input_ids.ne(_SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[Any] = Path(__UpperCAmelCase ).joinpath(type_path + '.source' )
lowerCAmelCase__ :int = Path(__UpperCAmelCase ).joinpath(type_path + '.target' )
lowerCAmelCase__ :List[Any] = self.get_char_lens(self.src_file )
lowerCAmelCase__ :Dict = max_source_length
lowerCAmelCase__ :Tuple = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
lowerCAmelCase__ :Union[str, Any] = tokenizer
lowerCAmelCase__ :Tuple = prefix
if n_obs is not None:
lowerCAmelCase__ :List[str] = self.src_lens[:n_obs]
lowerCAmelCase__ :Tuple = src_lang
lowerCAmelCase__ :int = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = index + 1 # linecache starts at 1
lowerCAmelCase__ :Any = self.prefix + linecache.getline(str(self.src_file ) , __UpperCAmelCase ).rstrip('\n' )
lowerCAmelCase__ :Dict = linecache.getline(str(self.tgt_file ) , __UpperCAmelCase ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase__ :str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
)
lowerCAmelCase__ :int = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
lowerCAmelCase__ :str = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , 'right' )
lowerCAmelCase__ :Optional[Any] = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , 'right' )
lowerCAmelCase__ :int = source_inputs['input_ids'].squeeze()
lowerCAmelCase__ :Optional[Any] = target_inputs['input_ids'].squeeze()
lowerCAmelCase__ :List[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
return [len(__UpperCAmelCase ) for x in Path(__UpperCAmelCase ).open().readlines()]
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.stack([x['input_ids'] for x in batch] )
lowerCAmelCase__ :Optional[Any] = torch.stack([x['attention_mask'] for x in batch] )
lowerCAmelCase__ :Any = torch.stack([x['decoder_input_ids'] for x in batch] )
lowerCAmelCase__ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ :Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
lowerCAmelCase__ :List[str] = trim_batch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__A = getLogger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = get_git_info()
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'git_log.json' ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = {
'repo_id': str(_SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List:
"""simple docstring"""
return list(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
return pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
def remove_articles(_SCREAMING_SNAKE_CASE ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase__ :List[str] = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
lowerCAmelCase__ :int = Counter(_SCREAMING_SNAKE_CASE ) & Counter(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase__ :int = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
return normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = 0
for hypo, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
em += exact_match_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
em /= len(_SCREAMING_SNAKE_CASE )
return {"em": em}
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return model_prefix.startswith('rag' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase__ :Union[str, Any] = 'dropout_rate'
for p in extra_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not hasattr(_SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
lowerCAmelCase__ :int = p if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return hparams, config
| 93 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : int , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any]="attention" ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowercase : Optional[int] =params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowercase : int =params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowercase : List[str] =params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def lowercase_ ( __A : List[str] , __A : Tuple , __A : List[Any] , __A : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
if split_mlp_wi:
lowercase : Optional[int] =params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowercase : str =params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowercase : str =(wi_a, wi_a)
else:
lowercase : Any =params[F'{prefix}/layers_{i}/mlp/wi/kernel']
lowercase : str =params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def lowercase_ ( __A : Dict , __A : Any , __A : Tuple , __A : str ) -> Any:
"""simple docstring"""
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def lowercase_ ( __A : dict , *, __A : int , __A : bool ) -> List[str]:
"""simple docstring"""
lowercase : Dict =traverse_util.flatten_dict(variables['''target'''] )
lowercase : int ={'''/'''.join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase : Dict ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , __A )
lowercase : List[str] =collections.OrderedDict()
# Shared embeddings.
lowercase : List[Any] =old['''token_embedder/embedding''']
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase : Optional[int] =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : Tuple =tax_attention_lookup(__A , __A , '''encoder''' , '''attention''' )
lowercase : Dict =layer_norm
lowercase : List[str] =k.T
lowercase : List[Any] =o.T
lowercase : Any =q.T
lowercase : Optional[int] =v.T
# Block i, layer 1 (MLP).
lowercase : int =tax_layer_norm_lookup(__A , __A , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase , lowercase : str =tax_mlp_lookup(__A , __A , '''encoder''' , __A )
lowercase : List[str] =layer_norm
if split_mlp_wi:
lowercase : str =wi[0].T
lowercase : str =wi[1].T
else:
lowercase : str =wi.T
lowercase : Tuple =wo.T
lowercase : List[str] =old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowercase : List[str] =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase : Dict =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : str =tax_attention_lookup(__A , __A , '''decoder''' , '''self_attention''' )
lowercase : int =layer_norm
lowercase : Tuple =k.T
lowercase : Union[str, Any] =o.T
lowercase : Any =q.T
lowercase : str =v.T
# Block i, layer 1 (Cross Attention).
lowercase : Any =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase , lowercase , lowercase , lowercase : Optional[int] =tax_attention_lookup(__A , __A , '''decoder''' , '''encoder_decoder_attention''' )
lowercase : Tuple =layer_norm
lowercase : Any =k.T
lowercase : Tuple =o.T
lowercase : int =q.T
lowercase : str =v.T
# Block i, layer 2 (MLP).
lowercase : Union[str, Any] =tax_layer_norm_lookup(__A , __A , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase , lowercase : str =tax_mlp_lookup(__A , __A , '''decoder''' , __A )
lowercase : Dict =layer_norm
if split_mlp_wi:
lowercase : List[Any] =wi[0].T
lowercase : Union[str, Any] =wi[1].T
else:
lowercase : Tuple =wi.T
lowercase : Optional[int] =wo.T
lowercase : Tuple =old['''decoder/decoder_norm/scale''']
lowercase : str =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase : Optional[Any] =old['''decoder/logits_dense/kernel'''].T
return new
def lowercase_ ( __A : Tuple , __A : bool ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase : List[str] =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase : Optional[Any] =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase : str =state_dict['''shared.weight''']
return state_dict
def lowercase_ ( __A : List[Any] , __A : List[str] , __A : Dict , __A : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] =checkpoints.load_tax_checkpoint(__A )
lowercase : Dict =convert_tax_to_pytorch(__A , num_layers=config.num_layers , is_encoder_only=__A )
lowercase : Dict =make_state_dict(__A , __A )
model.load_state_dict(__A , strict=__A )
def lowercase_ ( __A : Dict , __A : Union[str, Any] , __A : Any , __A : bool = False ) -> List[str]:
"""simple docstring"""
lowercase : Tuple =TaConfig.from_json_file(__A )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase : Union[str, Any] =TaEncoderModel(__A )
else:
lowercase : int =TaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A , __A , __A , __A )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print('''Done''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 94 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> int:
__magic_name__: str = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Optional[int] = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Any = resnets
__magic_name__: str = attentions
if self.add_downsample:
__magic_name__: List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : List[str]=True ) -> str:
__magic_name__: str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: Union[str, Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: Any = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: Optional[Any] = []
for i in range(self.num_layers ):
__magic_name__: str = self.in_channels if i == 0 else self.out_channels
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Union[str, Any] = resnets
if self.add_downsample:
__magic_name__: str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : List[Any]=True ) -> Dict:
__magic_name__: Optional[Any] = ()
for resnet in self.resnets:
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__magic_name__: List[str] = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: Optional[int] = []
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: Any = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: List[str] = resnets
__magic_name__: List[str] = attentions
if self.add_upsample:
__magic_name__: str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Dict=True ) -> List[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__magic_name__: Any = res_hidden_states_tuple[-1]
__magic_name__: Optional[int] = res_hidden_states_tuple[:-1]
__magic_name__: Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[Any] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = True
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
__magic_name__: Any = []
for i in range(self.num_layers ):
__magic_name__: List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__magic_name__: List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__magic_name__: List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Any = resnets
if self.add_upsample:
__magic_name__: List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[int]=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
__magic_name__: List[Any] = res_hidden_states_tuple[-1]
__magic_name__: Dict = res_hidden_states_tuple[:-1]
__magic_name__: Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__magic_name__: Optional[int] = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__magic_name__: Optional[Any] = self.upsamplers_a(__snake_case )
return hidden_states
class __A ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
def lowerCamelCase__ ( self : Dict ) -> Dict:
# there is always at least one resnet
__magic_name__: Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__magic_name__: str = []
for _ in range(self.num_layers ):
__magic_name__: List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__magic_name__: Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__magic_name__: Optional[Any] = resnets
__magic_name__: int = attentions
def __call__( self : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=True ) -> int:
__magic_name__: Optional[Any] = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__magic_name__: Optional[Any] = attn(__snake_case , __snake_case , deterministic=__snake_case )
__magic_name__: str = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 96 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
from __future__ import annotations
from math import pi, sqrt
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
'''simple docstring'''
def a__ ( lowercase : list ) -> float:
"""simple docstring"""
_UpperCamelCase = 0
while len(lowercase ) > 1:
_UpperCamelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_UpperCamelCase = files.index(min(lowercase ) )
temp += files[min_index]
files.pop(lowercase )
files.append(lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def a (lowerCAmelCase__ , lowerCAmelCase__=10 ):
__a = []
for _ in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def a (lowerCAmelCase__ , lowerCAmelCase__=10 ):
__a = []
for step in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(lowerCAmelCase__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ )
scheduler.load_state_dict(lowerCAmelCase__ )
return lrs
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A ):
self.assertEqual(len(__A ) , len(__A ) )
for a, b in zip(__A , __A ):
self.assertAlmostEqual(__A , __A , delta=__A )
def snake_case_ ( self ):
__a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A )
__a = torch.tensor([0.4, 0.2, -0.5] )
__a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__a = criterion(__A , __A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def snake_case_ ( self ):
__a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__A )
__a = torch.tensor([0.4, 0.2, -0.5] )
__a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__A , weight_decay=0.0 , relative_step=__A , scale_parameter=__A , warmup_init=__A , )
for _ in range(1000 ):
__a = criterion(__A , __A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
_lowerCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_lowerCamelCase = 10
def snake_case_ ( self , __A , __A , __A , __A=None ):
self.assertEqual(len(__A ) , len(__A ) )
for a, b in zip(__A , __A ):
self.assertAlmostEqual(__A , __A , delta=__A , msg=__A )
def snake_case_ ( self ):
__a = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a = data
__a = scheduler_func(self.optimizer , **__A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a = unwrap_schedule(__A , self.num_steps )
self.assertListAlmostEqual(
__A , __A , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__a = scheduler_func(self.optimizer , **__A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__A ) # wrap to test picklability of the schedule
__a = unwrap_and_save_reload_schedule(__A , self.num_steps )
self.assertListEqual(__A , __A , msg=f'''failed for {scheduler_func} in save and reload''' )
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = fn
def __call__( self , *__A , **__A ):
return self.fn(*__A , **__A )
@classmethod
def snake_case_ ( self , __A ):
__a = list(map(self , scheduler.lr_lambdas ) )
| 99 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_A : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 100 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase__ : int =tuple[int, int]
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : set[int] = vertices
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE_ : Tuple = weight
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Graph = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE_ : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE_ : Tuple = edge
SCREAMING_SNAKE_CASE_ : Tuple = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def a__ ( A__ = "p107_network.txt" ):
SCREAMING_SNAKE_CASE_ : str = os.path.abspath(os.path.dirname(A__ ) )
SCREAMING_SNAKE_CASE_ : str = os.path.join(A__, A__ )
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {}
SCREAMING_SNAKE_CASE_ : list[str]
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
with open(A__ ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.read().strip().split('\n' )
SCREAMING_SNAKE_CASE_ : List[Any] = [line.split(',' ) for line in data]
for edgea in range(1, len(A__ ) ):
for edgea in range(A__ ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE_ : Tuple = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE_ : Graph = Graph(set(range(len(A__ ) ) ), A__ )
SCREAMING_SNAKE_CASE_ : Graph = graph.prims_algorithm()
SCREAMING_SNAKE_CASE_ : int = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__magic_name__ : Union[str, Any] = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__magic_name__ : Optional[int] = """|""".join(sys.argv[1:])
__magic_name__ : Union[str, Any] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
__magic_name__ : Dict = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 102 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
while a != 0:
_snake_case , _snake_case = b % a, a
return b
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1:
_snake_case = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = 1, 0, a
_snake_case , _snake_case , _snake_case = 0, 1, m
while va != 0:
_snake_case = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 103 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
A__ = label_idx
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 104 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
inspect_dataset(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_config_names(lowerCamelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_infos(lowerCamelCase_ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_dataset_infos(lowerCamelCase_ )
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_ )
| 105 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
from collections.abc import Sequence
def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
A = nums[i]
A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case :str =int(input('Enter number of elements : ').strip())
__snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 106 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''bert-generation'''
def __init__( self : List[Any] , lowerCamelCase : Any=5_0358 , lowerCamelCase : Union[str, Any]=1024 , lowerCamelCase : str=24 , lowerCamelCase : List[str]=16 , lowerCamelCase : Optional[int]=4096 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Dict=512 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Optional[Any]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Any=1 , lowerCamelCase : Tuple="absolute" , lowerCamelCase : List[str]=True , **lowerCamelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 108 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'wavlm'
def __init__( self : List[Any] ,lowerCamelCase : List[str]=32 ,lowerCamelCase : Any=768 ,lowerCamelCase : Optional[int]=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : List[str]=3072 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : int=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : int="group" ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Tuple=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : int=320 ,lowerCamelCase : Dict=800 ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : str=320 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=100 ,lowerCamelCase : Optional[Any]=256 ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : List[str]="mean" ,lowerCamelCase : Dict=False ,lowerCamelCase : int=False ,lowerCamelCase : Tuple=256 ,lowerCamelCase : str=(512, 512, 512, 512, 1500) ,lowerCamelCase : List[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase : List[Any]=(1, 2, 3, 1, 1) ,lowerCamelCase : str=512 ,lowerCamelCase : Optional[Any]=80 ,lowerCamelCase : Optional[int]=0 ,lowerCamelCase : Dict=1 ,lowerCamelCase : Any=2 ,lowerCamelCase : List[str]=False ,lowerCamelCase : Optional[Any]=3 ,lowerCamelCase : int=2 ,lowerCamelCase : int=3 ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_buckets
__SCREAMING_SNAKE_CASE = max_bucket_distance
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_ctc_classes
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 109 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import random
from typing import Any
def lowerCamelCase ( _snake_case ):
for _ in range(len(_snake_case ) ):
UpperCAmelCase__ : str = random.randint(0 ,len(_snake_case ) - 1 )
UpperCAmelCase__ : List[str] = random.randint(0 ,len(_snake_case ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 110 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[List[ImageInput]]:
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Union[str, Any] , ) -> Dict:
super().__init__(**a__ )
lowerCamelCase__ : str = size if size is not None else {'shortest_edge': 256}
lowerCamelCase__ : Union[str, Any] = get_size_dict(a__ , default_to_square=a__ )
lowerCamelCase__ : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : Any = get_size_dict(a__ , param_name='crop_size' )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : Union[str, Any] = do_center_crop
lowerCamelCase__ : Optional[Any] = crop_size
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : Optional[int] = offset
lowerCamelCase__ : Tuple = do_normalize
lowerCamelCase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> Any:
lowerCamelCase__ : Tuple = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
lowerCamelCase__ : Dict = get_resize_output_image_size(a__ , size['shortest_edge'] , default_to_square=a__ )
elif "height" in size and "width" in size:
lowerCamelCase__ : Any = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a__ , size=(size['height'], size['width']) , data_format=a__ , **a__ )
def A_ ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> Tuple:
lowerCamelCase__ : Optional[Any] = image.astype(np.floataa )
if offset:
lowerCamelCase__ : Optional[Any] = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def A_ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ) -> Optional[int]:
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def A_ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> int:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Any = to_numpy_array(a__ )
if do_resize:
lowerCamelCase__ : Optional[int] = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
lowerCamelCase__ : int = self.center_crop(a__ , size=a__ )
if do_rescale:
lowerCamelCase__ : int = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
lowerCamelCase__ : Optional[Any] = self.normalize(image=a__ , mean=a__ , std=a__ )
lowerCamelCase__ : Optional[int] = to_channel_dimension_format(a__ , a__ )
return image
def A_ ( self : List[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> str:
lowerCamelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : List[Any] = resample if resample is not None else self.resample
lowerCamelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[int] = offset if offset is not None else self.offset
lowerCamelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Tuple = size if size is not None else self.size
lowerCamelCase__ : Any = get_size_dict(a__ , default_to_square=a__ )
lowerCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : Optional[int] = get_size_dict(a__ , param_name='crop_size' )
if not valid_images(a__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCamelCase__ : Optional[int] = make_batched(a__ )
lowerCamelCase__ : Dict = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
lowerCamelCase__ : Tuple = {'pixel_values': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 295 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Any = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class a_ ( UpperCAmelCase_ ):
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : int="</s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : int="<unk>" , snake_case__ : List[Any]="<pad>" , snake_case__ : Any="<mask>" , snake_case__ : List[Any]=False , **snake_case__ : str , ):
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ = json.load(a__ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(R"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(a__ )
lowerCAmelCase__ = get_pairs(a__ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(a__ , key=lambda snake_case__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(a__ ):
try:
lowerCAmelCase__ = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(a__ )
lowerCAmelCase__ = new_word
if len(a__ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(a__ )
lowerCAmelCase__ = """ """.join(a__ )
lowerCAmelCase__ = word
return word
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str ):
lowerCAmelCase__ = []
for token in re.findall(self.pat , a__ ):
lowerCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(""" """ ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : int ):
return self.decoder.get(a__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple ):
lowerCAmelCase__ = """""".join(a__ )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
lowerCAmelCase__ = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any]=False , **snake_case__ : Dict ):
lowerCAmelCase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = """ """ + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
lowerCAmelCase__ = super()._pad(
encoded_inputs=a__ , max_length=a__ , padding_strategy=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase__ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase__ = len(encoded_inputs["""global_attention_mask"""] ) != len(a__ )
if needs_to_be_padded:
lowerCAmelCase__ = len(a__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase__ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase__ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 644 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MobileNetV2FeatureExtractor']
lowercase_ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 154 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_lowercase = input('Enter image url: ').strip()
print(f'Downloading image from {url} ...')
_lowercase = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
_lowercase = soup.find('meta', {'property': 'og:image'})['content']
_lowercase = requests.get(image_url).content
_lowercase = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 342 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''biogpt'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[Any]=4_2384 , UpperCAmelCase_ : List[Any]=1024 , UpperCAmelCase_ : Optional[Any]=24 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : str=4096 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=1024 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : List[str]=2 , **UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = scale_embedding
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE : Tuple = layerdrop
SCREAMING_SNAKE_CASE : Any = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 62 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
from __future__ import annotations
import requests
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(SCREAMING_SNAKE_CASE_ ).json()
def a__ ( A__ = 1_0 ):
SCREAMING_SNAKE_CASE_ : Dict = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
SCREAMING_SNAKE_CASE_ : Optional[Any] = requests.get(SCREAMING_SNAKE_CASE_ ).json()[:max_stories]
return [get_hackernews_story(SCREAMING_SNAKE_CASE_ ) for story_id in story_ids]
def a__ ( A__ = 1_0 ):
SCREAMING_SNAKE_CASE_ : Any = hackernews_top_stories(SCREAMING_SNAKE_CASE_ )
return "\n".join('* [{title}]({url})'.format(**SCREAMING_SNAKE_CASE_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a__ : Optional[Any] =pd.read_csv('''sample_data.csv''', header=None)
a__ : str =df.shape[:1][0]
# If you're using some other dataset input the target column
a__ : Dict =df.iloc[:, 1:2]
a__ : Tuple =actual_data.values.reshape(len_data, 1)
a__ : int =MinMaxScaler().fit_transform(actual_data)
a__ : int =10
a__ : int =5
a__ : Dict =20
a__ : Union[str, Any] =len_data - periods * look_back
a__ : Optional[int] =actual_data[:division]
a__ : Union[str, Any] =actual_data[division - look_back :]
a__ : Any =[], []
a__ : Tuple =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a__ : Any =np.array(train_x)
a__ : List[str] =np.array(test_x)
a__ : str =np.array([list(i.ravel()) for i in train_y])
a__ : Optional[int] =np.array([list(i.ravel()) for i in test_y])
a__ : int =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
a__ : Optional[Any] =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
a__ : List[Any] =model.predict(x_test)
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a__ =['''pixel_values''']
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , A = True , **A , ) -> Optional[int]:
super().__init__(**a__ )
_UpperCAmelCase : str = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_UpperCAmelCase : Dict = get_size_dict(a__ , default_to_square=a__ )
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Dict = resample
_UpperCAmelCase : Optional[int] = do_rescale
_UpperCAmelCase : List[Any] = rescale_factor
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : List[str] = do_convert_rgb
def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> Union[str, Any]:
_UpperCAmelCase : Dict = get_size_dict(a__ , default_to_square=a__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
_UpperCAmelCase : List[Any] = (size['''height'''], size['''width'''])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __lowerCAmelCase ( self , A , A , A , A = None , **A , ) -> str:
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> Optional[Any]:
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : int = get_size_dict(a__ , default_to_square=a__ )
_UpperCAmelCase : Union[str, Any] = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : List[Any] = [convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : Union[str, Any] = [to_numpy_array(a__ ) for image in images]
if do_resize:
_UpperCAmelCase : Any = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[Any] = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_UpperCAmelCase : Any = [to_channel_dimension_format(a__ , a__ ) for image in images]
_UpperCAmelCase : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=a__ )
return encoded_outputs
| 506 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : str , a : Union[str, Any]=13 , a : Any=32 , a : int=2 , a : str=3 , a : List[Any]=16 , a : Tuple=[1, 2, 1] , a : Union[str, Any]=[2, 2, 4] , a : Optional[int]=2 , a : str=2.0 , a : int=True , a : Any=0.0 , a : Optional[Any]=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : Tuple=True , a : int=0.02 , a : Tuple=1E-5 , a : str=True , a : Optional[Any]=None , a : List[str]=True , a : Tuple=10 , a : int=8 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self : Any )-> str:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
lowercase__ = model(a__ )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[int] , a : Union[str, Any] , a : int )-> Any:
"""simple docstring"""
lowercase__ = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
lowercase__ = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Dict , a : Tuple , a : int )-> List[Any]:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
lowercase__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : int = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_UpperCamelCase : Tuple = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = SwinvaModelTester(self )
lowercase__ = ConfigTester(self , config_class=a__ , embed_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowercase__ = outputs.attentions
lowercase__ = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = config.window_size**2
lowercase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowercase__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowercase__ = len(a__ )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowercase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
lowercase__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : List[str] , a : Any , a : Tuple )-> Optional[int]:
"""simple docstring"""
lowercase__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a__ , a__ ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = reshaped_hidden_states[0].shape
lowercase__ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a__ )
lowercase__ = self.default_image_processor
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase__ = image_processor(images=a__ , return_tensors='pt' ).to(a__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**a__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
lowercase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 235 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE_ = 128022
SCREAMING_SNAKE_CASE_ = 128028
@require_sentencepiece
class a ( UpperCAmelCase_ , unittest.TestCase ):
_lowercase = MaMaaaTokenizer
_lowercase = False
_lowercase = False
_lowercase = True
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Optional[int] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_UpperCAmelCase : Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_UpperCAmelCase : Union[str, Any] = Path(self.tmpdirname )
save_json(a__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_UpperCAmelCase : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "</s>"
_UpperCAmelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(a__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [2, 3, 4, 5, 6] , )
_UpperCAmelCase : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_UpperCAmelCase : Dict = tokenizer.convert_tokens_to_string(a__ )
self.assertEqual(a__ , "This is a test" )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
_lowercase = "facebook/m2m100_418M"
_lowercase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
_lowercase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
_lowercase = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def _UpperCAmelCase ( cls ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_UpperCAmelCase : int = 1
return cls
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128063 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.get_vocab()
self.assertEqual(len(a__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , a__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = "en"
_UpperCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
_UpperCAmelCase : int = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
_UpperCAmelCase : str = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
_UpperCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a__ )
_UpperCAmelCase : List[Any] = MaMaaaTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.lang_token_to_id , a__ )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "en"
_UpperCAmelCase : Optional[Any] = "fr"
_UpperCAmelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors="pt" )
_UpperCAmelCase : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_UpperCAmelCase : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_UpperCAmelCase : str = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_UpperCAmelCase : Optional[Any] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(a__ ) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 300 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51 | 0 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __A : Optional[int] , __A : Union[str, Any] , __A : Tuple=None ) -> Any:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
SCREAMING_SNAKE_CASE : int = nn.Parameter(SCREAMING_SNAKE_CASE_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
SCREAMING_SNAKE_CASE : str = nn.Parameter(SCREAMING_SNAKE_CASE_ )
def __snake_case ( __A : Union[str, Any] , __A : str , __A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( __A : Tuple , __A : Optional[Any] , __A : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE : Any = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE : Tuple = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE : str = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def __snake_case ( __A : List[str] , __A : Optional[Any] , __A : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = weights[0][0][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE : List[str] = weights[0][1]
if len(SCREAMING_SNAKE_CASE_ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
# intermediate weighs
SCREAMING_SNAKE_CASE : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE_ ) == 4:
SCREAMING_SNAKE_CASE : Dict = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE : Tuple = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate dense
SCREAMING_SNAKE_CASE : Tuple = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate out
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def __snake_case ( __A : Any , __A : Any , __A : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# output layer norm
SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# output embeddings
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def __snake_case ( __A : Dict , __A : Any , __A : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : Any = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE : str = pickle.load(SCREAMING_SNAKE_CASE_ )['weights']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Union[str, Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 265 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Union[str, Any] , a__ : List[str]=None , a__ : Union[str, Any]=None , **a__ : Optional[Any] ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Any , a__ : Any=None , a__ : str=None , a__ : List[Any]=None , **a__ : List[str] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : Optional[Any] , *a__ : int , **a__ : List[Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : Any , *a__ : Union[str, Any] , **a__ : Any ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 51 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_UpperCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase ( UpperCAmelCase_ ):
def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=1 ) -> List[str]:
lowerCamelCase__ : List[str] = tokenizer
lowerCamelCase__ : str = dataset
lowerCamelCase__ : Tuple = len(a__ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Dict = n_copies
def __iter__( self : int ) -> Tuple:
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(a__ , padding=a__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase ( UpperCAmelCase_ ):
def __init__( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Dict = start_length
lowerCamelCase__ : Union[str, Any] = eof_strings
lowerCamelCase__ : Any = tokenizer
def __call__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a__ )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : int = re.split('(%s)' % '|'.join(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# last string should be ""
return "".join(string_list[:-2] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=20 , **_UpperCAmelCase ) -> Any:
lowerCamelCase__ : Dict = defaultdict(SCREAMING_SNAKE_CASE_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE_ ) ):
with torch.no_grad():
lowerCamelCase__ : Optional[int] = batch['ids'].shape[-1]
lowerCamelCase__ : Optional[Any] = accelerator.unwrap_model(SCREAMING_SNAKE_CASE_ ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# each task is generated batch_size times
lowerCamelCase__ : Optional[int] = batch['task_id'].repeat(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[int] = accelerator.pad_across_processes(
SCREAMING_SNAKE_CASE_ , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : Dict = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
gen_token_dict[task].append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : int = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE_ ) )
return code_gens
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : str = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Dict = 'false'
if args.num_workers is None:
lowerCamelCase__ : Tuple = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : int = Accelerator()
set_seed(args.seed , device_specific=SCREAMING_SNAKE_CASE_ )
# Load model and tokenizer
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : List[str] = tokenizer.eos_token
lowerCamelCase__ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[int] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : int = load_dataset('openai_humaneval' )
lowerCamelCase__ : Dict = load_metric('code_eval' )
lowerCamelCase__ : Any = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Dict = args.n_samples // args.batch_size
lowerCamelCase__ : Union[str, Any] = TokenizedDataset(SCREAMING_SNAKE_CASE_ , human_eval['test'] , n_copies=SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[int] = complete_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size , **SCREAMING_SNAKE_CASE_ , )
if accelerator.is_main_process:
lowerCamelCase__ : List[Any] = []
for task in tqdm(range(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase__ : str = human_eval['test'][task]['test']
lowerCamelCase__ : Optional[Any] = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : int = code_eval_metric.compute(
references=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 295 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 51 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase : Dict = 'src/transformers'
__lowerCAmelCase : Tuple = 'docs/source/en'
__lowerCAmelCase : str = '.'
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase : List[Any] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : Tuple = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : Optional[int] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : Union[str, Any] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = 2 if text == """✅""" or text == """❌""" else len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = (width - text_length) // 2
lowerCAmelCase__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase__ = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = None
if attr_name.endswith("""Tokenizer""" ):
lowerCAmelCase__ = slow_tokenizers
lowerCAmelCase__ = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCAmelCase__ = fast_tokenizers
lowerCAmelCase__ = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowerCAmelCase__ = tf_models
lowerCAmelCase__ = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowerCAmelCase__ = flax_models
lowerCAmelCase__ = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
lowerCAmelCase__ = pt_models
lowerCAmelCase__ = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase__ = True
break
# Try again after removing the last word in the name
lowerCAmelCase__ = """""".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
lowerCAmelCase__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase__ = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase__ = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
lowerCAmelCase__ = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase__ = """|""" + """|""".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCAmelCase__ = {True: """✅""", False: """❌"""}
for name in model_names:
lowerCAmelCase__ = model_name_to_prefix[name]
lowerCAmelCase__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def _UpperCAmelCase ( lowerCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCAmelCase__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase : Optional[int] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 644 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : Any =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : Any =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : int =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[str] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _lowercase ( UpperCAmelCase_ ):
_UpperCAmelCase = '''altclip_text_model'''
def __init__( self , A__=25_00_02 , A__=10_24 , A__=24 , A__=16 , A__=40_96 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_14 , A__=1 , A__=0.0_2 , A__=0.0_2 , A__=1e-05 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=7_68 , **A__ , ) -> Optional[Any]:
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = initializer_factor
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = project_dim
class _lowercase ( UpperCAmelCase_ ):
_UpperCAmelCase = '''altclip_vision_model'''
def __init__( self , A__=7_68 , A__=30_72 , A__=5_12 , A__=12 , A__=12 , A__=3 , A__=2_24 , A__=32 , A__="quick_gelu" , A__=1e-5 , A__=0.0 , A__=0.0_2 , A__=1.0 , **A__ , ) -> int:
super().__init__(**a__ )
snake_case = hidden_size
snake_case = intermediate_size
snake_case = projection_dim
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = num_channels
snake_case = patch_size
snake_case = image_size
snake_case = initializer_range
snake_case = initializer_factor
snake_case = attention_dropout
snake_case = layer_norm_eps
snake_case = hidden_act
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> Union[str, Any]:
cls._set_token_in_kwargs(a__ )
snake_case , snake_case = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
snake_case = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a__ , **a__ )
class _lowercase ( UpperCAmelCase_ ):
_UpperCAmelCase = '''altclip'''
_UpperCAmelCase = True
def __init__( self , A__=None , A__=None , A__=7_68 , A__=2.6_5_9_2 , **A__ ) -> List[Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
snake_case = kwargs.pop('''text_config_dict''' , a__ )
snake_case = kwargs.pop('''vision_config_dict''' , a__ )
super().__init__(**a__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
snake_case = {}
# This is the complete result when using `text_config_dict`.
snake_case = AltCLIPTextConfig(**a__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
snake_case = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(a__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
snake_case = {}
# This is the complete result when using `vision_config_dict`.
snake_case = AltCLIPVisionConfig(**a__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
snake_case = {
str(a__ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
snake_case = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
snake_case = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(a__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
snake_case = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
snake_case = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
snake_case = AltCLIPTextConfig(**a__ )
snake_case = AltCLIPVisionConfig(**a__ )
snake_case = projection_dim
snake_case = logit_scale_init_value
snake_case = 1.0
@classmethod
def UpperCamelCase ( cls , A__ , A__ , **A__ ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def UpperCamelCase ( self ) -> Any:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.text_config.to_dict()
snake_case = self.vision_config.to_dict()
snake_case = self.__class__.model_type
return output
| 342 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : List[str]="sayef/fsner-bert-base-uncased" ):
super(a__ , self ).__init__()
UpperCAmelCase = AutoModel.from_pretrained(a__ , return_dict=a__ )
UpperCAmelCase = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCAmelCase = torch.nn.Softmax(dim=1 )
def __snake_case ( self : List[Any] , **a__ : Tuple ):
return self.bert(**a__ ).last_hidden_state
def __snake_case ( self : int , a__ : List[str] ):
return token_embeddings.sum(2 , keepdim=a__ )
def __snake_case ( self : str , a__ : str , a__ : str , a__ : int=1 ):
return self.softmax(T * self.cos(a__ , a__ ) )
def __snake_case ( self : Tuple , a__ : Tuple , a__ : str ):
UpperCAmelCase = W_supports['''sizes'''].tolist()
UpperCAmelCase = W_supports['''start_token_id'''].item()
UpperCAmelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = self.BERT(**a__ )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = W_supports['''input_ids'''] == start_token_id
UpperCAmelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(a__ ):
if i == 0:
UpperCAmelCase = 0
else:
UpperCAmelCase = support_sizes[i - 1]
UpperCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCAmelCase = torch.vstack((p_starts, p_start) )
UpperCAmelCase = torch.vstack((p_ends, p_end) )
else:
UpperCAmelCase = p_start
UpperCAmelCase = p_end
return p_starts, p_ends
| 51 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : List[Any]=[10, 20, 30, 40] , UpperCAmelCase_ : Dict=[1, 1, 2, 1] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = embeddings_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a__ )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def _A ( self : Any ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Any = RegNetModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = RegNetForImageClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[Any] = False
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Any = RegNetModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def _A ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : Optional[int] ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _A ( self : Optional[int] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _A ( self : Tuple ):
pass
def _A ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _A ( self : Dict ):
def check_hidden_states_output(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : Optional[int] = layer_type
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def _A ( self : str ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = RegNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 62 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =(EulerDiscreteScheduler,)
_lowerCamelCase =10
def __snake_case ( self : str , **a__ : Tuple ):
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a__ )
return config
def __snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __snake_case ( self : Optional[int] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __snake_case ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def __snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0_807 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def __snake_case ( self : str ):
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(a__ )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(a__ , a__ )
UpperCAmelCase = model(a__ , a__ )
UpperCAmelCase = scheduler.step(a__ , a__ , a__ , generator=a__ )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(a__ ) )
UpperCAmelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 51 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
SCREAMING_SNAKE_CASE_ : List[str] = BlipProcessor(a__ , a__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).tokenizer
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : str = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = BlipProcessor(tokenizer=a__ , image_processor=a__ )
SCREAMING_SNAKE_CASE_ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(a__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Any = processor(images=a__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = BlipProcessor(tokenizer=a__ , image_processor=a__ )
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = processor(text=a__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(a__ , return_token_type_ids=a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = BlipProcessor(tokenizer=a__ , image_processor=a__ )
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : str = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = BlipProcessor(tokenizer=a__ , image_processor=a__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[Any] = processor.batch_decode(a__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = BlipProcessor(tokenizer=a__ , image_processor=a__ )
SCREAMING_SNAKE_CASE_ : int = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=a__ , images=a__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : int , __A : int , __A : int , __A : str=0.0 , __A : Optional[int] = None , __A : str = "geglu" , __A : Optional[int] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : str = "layer_norm" , __A : bool = False , ):
super().__init__()
__UpperCamelCase = only_cross_attention
__UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
__UpperCamelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__UpperCamelCase = AdaLayerNorm(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase = AdaLayerNormZero(a__ , a__ )
else:
__UpperCamelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
__UpperCamelCase = Attention(
query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__UpperCamelCase = (
AdaLayerNorm(a__ , a__ )
if self.use_ada_layer_norm
else nn.LayerNorm(a__ , elementwise_affine=a__ )
)
__UpperCamelCase = Attention(
query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none
else:
__UpperCamelCase = None
__UpperCamelCase = None
# 3. Feed-forward
__UpperCamelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
__UpperCamelCase = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ )
# let chunk size default to None
__UpperCamelCase = None
__UpperCamelCase = 0
def _lowerCamelCase ( self : Tuple , __A : Optional[int] , __A : int ):
# Sets chunk feed-forward
__UpperCamelCase = chunk_size
__UpperCamelCase = dim
def _lowerCamelCase ( self : Optional[Any] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.LongTensor] = None , __A : Dict[str, Any] = None , __A : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__UpperCamelCase = self.norma(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.norma(
a__ , a__ , a__ , hidden_dtype=hidden_states.dtype )
else:
__UpperCamelCase = self.norma(a__ )
__UpperCamelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__UpperCamelCase = self.attna(
a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = gate_msa.unsqueeze(1 ) * attn_output
__UpperCamelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__UpperCamelCase = (
self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ )
)
__UpperCamelCase = self.attna(
a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , )
__UpperCamelCase = attn_output + hidden_states
# 3. Feed-forward
__UpperCamelCase = self.norma(a__ )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__UpperCamelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__UpperCamelCase = torch.cat(
[self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__UpperCamelCase = self.ff(a__ )
if self.use_ada_layer_norm_zero:
__UpperCamelCase = gate_mlp.unsqueeze(1 ) * ff_output
__UpperCamelCase = ff_output + hidden_states
return hidden_states
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : Optional[int] = None , __A : int = 4 , __A : float = 0.0 , __A : str = "geglu" , __A : bool = False , ):
super().__init__()
__UpperCamelCase = int(dim * mult )
__UpperCamelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__UpperCamelCase = GELU(a__ , a__ )
if activation_fn == "gelu-approximate":
__UpperCamelCase = GELU(a__ , a__ , approximate='tanh' )
elif activation_fn == "geglu":
__UpperCamelCase = GEGLU(a__ , a__ )
elif activation_fn == "geglu-approximate":
__UpperCamelCase = ApproximateGELU(a__ , a__ )
__UpperCamelCase = nn.ModuleList([] )
# project in
self.net.append(a__ )
# project dropout
self.net.append(nn.Dropout(a__ ) )
# project out
self.net.append(nn.Linear(a__ , a__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a__ ) )
def _lowerCamelCase ( self : Any , __A : List[Any] ):
for module in self.net:
__UpperCamelCase = module(a__ )
return hidden_states
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __A : int , __A : int , __A : str = "none" ):
super().__init__()
__UpperCamelCase = nn.Linear(a__ , a__ )
__UpperCamelCase = approximate
def _lowerCamelCase ( self : List[str] , __A : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(a__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCamelCase ( self : Union[str, Any] , __A : List[Any] ):
__UpperCamelCase = self.proj(a__ )
__UpperCamelCase = self.gelu(a__ )
return hidden_states
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : int ):
super().__init__()
__UpperCamelCase = nn.Linear(a__ , dim_out * 2 )
def _lowerCamelCase ( self : List[Any] , __A : List[str] ):
if gate.device.type != "mps":
return F.gelu(a__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCamelCase ( self : Dict , __A : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.proj(a__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a__ )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __A : int , __A : int ):
super().__init__()
__UpperCamelCase = nn.Linear(a__ , a__ )
def _lowerCamelCase ( self : Optional[int] , __A : Union[str, Any] ):
__UpperCamelCase = self.proj(a__ )
return x * torch.sigmoid(1.702 * x )
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Tuple , __A : str ):
super().__init__()
__UpperCamelCase = nn.Embedding(a__ , a__ )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Linear(a__ , embedding_dim * 2 )
__UpperCamelCase = nn.LayerNorm(a__ , elementwise_affine=a__ )
def _lowerCamelCase ( self : int , __A : Union[str, Any] , __A : Optional[int] ):
__UpperCamelCase = self.linear(self.silu(self.emb(a__ ) ) )
__UpperCamelCase , __UpperCamelCase = torch.chunk(a__ , 2 )
__UpperCamelCase = self.norm(a__ ) * (1 + scale) + shift
return x
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __A : List[str] , __A : List[str] ):
super().__init__()
__UpperCamelCase = CombinedTimestepLabelEmbeddings(a__ , a__ )
__UpperCamelCase = nn.SiLU()
__UpperCamelCase = nn.Linear(a__ , 6 * embedding_dim , bias=a__ )
__UpperCamelCase = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1e-6 )
def _lowerCamelCase ( self : List[str] , __A : str , __A : List[str] , __A : List[str] , __A : int=None ):
__UpperCamelCase = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = emb.chunk(6 , dim=1 )
__UpperCamelCase = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __A : int , __A : int , __A : int , __A : Optional[str] = None , __A : float = 1e-5 ):
super().__init__()
__UpperCamelCase = num_groups
__UpperCamelCase = eps
if act_fn is None:
__UpperCamelCase = None
else:
__UpperCamelCase = get_activation(a__ )
__UpperCamelCase = nn.Linear(a__ , out_dim * 2 )
def _lowerCamelCase ( self : List[Any] , __A : Optional[int] , __A : int ):
if self.act:
__UpperCamelCase = self.act(a__ )
__UpperCamelCase = self.linear(a__ )
__UpperCamelCase = emb[:, :, None, None]
__UpperCamelCase , __UpperCamelCase = emb.chunk(2 , dim=1 )
__UpperCamelCase = F.group_norm(a__ , self.num_groups , eps=self.eps )
__UpperCamelCase = x * (1 + scale) + shift
return x
| 399 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict="." ):
UpperCAmelCase = []
for k, v in d.items():
UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sep=SCREAMING_SNAKE_CASE_ ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as yaml_file:
try:
UpperCAmelCase = yaml.load(SCREAMING_SNAKE_CASE_ , Loader=yaml.FullLoader )
UpperCAmelCase = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE_ )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) ) )
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase = MobileViTVaConfig()
UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCAmelCase = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCAmelCase = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCAmelCase = 384
else:
UpperCAmelCase = 256
UpperCAmelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCAmelCase = 151
UpperCAmelCase = 512
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
UpperCAmelCase = 21
UpperCAmelCase = 512
UpperCAmelCase = '''pascal-voc-id2label.json'''
UpperCAmelCase = True
# orig_config
UpperCAmelCase = load_orig_config_file(SCREAMING_SNAKE_CASE_ )
assert getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
"""simple docstring"""
if base_model:
UpperCAmelCase = ''''''
else:
UpperCAmelCase = '''mobilevitv2.'''
UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase = k[8:]
else:
UpperCAmelCase = k
if ".block." in k:
UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCAmelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase = [0, 1]
elif i == 4:
UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
UpperCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
UpperCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE_ )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = get_mobilevitva_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
else:
UpperCAmelCase = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = False
# remove and rename some keys of load the original model
UpperCAmelCase = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCAmelCase = outputs.logits
UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
a__ : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 51 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase :Dict = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( UpperCAmelCase_ ,UpperCAmelCase_ ):
'''simple docstring'''
a__ ='''nat'''
a__ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A=4 , A=3 , A=6_4 , A=[3, 4, 6, 5] , A=[2, 4, 8, 1_6] , A=7 , A=3.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=0.02 , A=1E-5 , A=0.0 , A=None , A=None , **A , ) -> List[str]:
super().__init__(**a__ )
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : Tuple = depths
_UpperCAmelCase : List[str] = len(a__ )
_UpperCAmelCase : Dict = num_heads
_UpperCAmelCase : Tuple = kernel_size
_UpperCAmelCase : int = mlp_ratio
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : int = drop_path_rate
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase : Optional[int] = int(embed_dim * 2 ** (len(a__ ) - 1) )
_UpperCAmelCase : List[Any] = layer_scale_init_value
_UpperCAmelCase : Optional[int] = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(a__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 506 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="megatron-bert"
def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ):
super().__init__(pad_token_id=a__ , **a__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
| 51 | 0 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = []
lowercase__ = []
lowercase__ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowercase__ = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(SCREAMING_SNAKE_CASE_ ) , 'Postfix'.center(SCREAMING_SNAKE_CASE_ ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack
print(
x.center(8 ) , (''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , (''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=' | ' , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , (''.join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=' | ' , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if infix[i] == "(":
lowercase__ = ')' # change "(" to ")"
elif infix[i] == ")":
lowercase__ = '(' # change ")" to "("
return (infix_2_postfix(''.join(SCREAMING_SNAKE_CASE_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowercase_ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
lowercase_ = ''.join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 235 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 300 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
def __snake_case ( __A : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, 0, 0
SCREAMING_SNAKE_CASE : Tuple = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE : List[Any] = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
ugly_nums.append(SCREAMING_SNAKE_CASE_ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCAmelCase : int = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = """maskformer"""
UpperCAmelCase__ = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase__ = ["""resnet""", """swin"""]
UpperCAmelCase__ = ["""detr"""]
def __init__( self : Tuple , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 2_0.0 , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[Any] , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(a__ , a__ ):
lowerCamelCase__ : Optional[int] = backbone_config.pop('model_type' )
lowerCamelCase__ : int = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : List[Any] = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : Optional[int] = (
decoder_config.pop('model_type' ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(a__ , a__ ):
lowerCamelCase__ : List[str] = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : List[str] = config_class.from_dict(a__ )
lowerCamelCase__ : Optional[int] = backbone_config
lowerCamelCase__ : Any = decoder_config
# main feature dimension for the model
lowerCamelCase__ : List[str] = fpn_feature_size
lowerCamelCase__ : Any = mask_feature_size
# initializer
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : Optional[Any] = dice_weight
lowerCamelCase__ : Optional[int] = mask_weight
lowerCamelCase__ : Union[str, Any] = use_auxiliary_loss
lowerCamelCase__ : str = no_object_weight
lowerCamelCase__ : Dict = output_auxiliary_logits
lowerCamelCase__ : int = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : Any = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def A_ ( cls : Tuple , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : List[str] ) -> Any:
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def A_ ( self : List[Any] ) -> List[Any]:
lowerCamelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Union[str, Any] = self.backbone_config.to_dict()
lowerCamelCase__ : List[Any] = self.decoder_config.to_dict()
lowerCamelCase__ : List[Any] = self.__class__.model_type
return output
| 295 |
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51 | 0 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = model.config
lowerCAmelCase__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCAmelCase__ = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , )
return encoder_config, decoder_config
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "encoder.model" in name:
lowerCAmelCase__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCAmelCase__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCAmelCase__ = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCAmelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCAmelCase__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCAmelCase__ = """encoder.layernorm.bias"""
return name
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
lowerCAmelCase__ = key.split(""".""" )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = int(key_split[5] )
lowerCAmelCase__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCAmelCase__ = val
return orig_state_dict
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase__ = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
# load HuggingFace model
lowerCAmelCase__ , lowerCAmelCase__ = get_configs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = DonutSwinModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify results on scanned document
lowerCAmelCase__ = load_dataset("""hf-internal-testing/example-documents""" )
lowerCAmelCase__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase__ = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCAmelCase__ = DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCAmelCase__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCAmelCase__ = """When is the coffee break?"""
lowerCAmelCase__ = task_prompt.replace("""{user_input}""" , SCREAMING_SNAKE_CASE_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCAmelCase__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCAmelCase__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCAmelCase__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCAmelCase__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCAmelCase__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCAmelCase__ = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCAmelCase__ = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ = model.encoder.embeddings(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
# verify encoder hidden states
lowerCAmelCase__ = original_model.encoder(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# verify decoder hidden states
lowerCAmelCase__ = original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 644 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =StableUnCLIPPipeline
_lowerCamelCase =TEXT_TO_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase =False
def __snake_case ( self : str ):
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=a__ )
UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self : str , a__ : Dict , a__ : List[str]=0 ):
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : List[Any] ):
UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __snake_case ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 51 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] =["image_processor", "tokenizer"]
lowerCamelCase__ : Tuple ="CLIPImageProcessor"
lowerCamelCase__ : int =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__magic_name__ : List[str] = kwargs.pop('''feature_extractor''' )
__magic_name__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ) -> str:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ : Dict = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__magic_name__ : Optional[int] = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__magic_name__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def lowercase ( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : List[Any] = self.tokenizer.model_input_names
__magic_name__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 154 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def __UpperCamelCase ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 342 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case = datasets.utils.logging.get_logger(__name__)
snake_case = ['names', 'prefix']
snake_case = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
snake_case = ['encoding_errors', 'on_bad_lines']
snake_case = ['date_format']
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ''','''
UpperCamelCase_ : str = None
UpperCamelCase_ : List[str] = '''infer'''
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : str = None
UpperCamelCase_ : Any = None
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Any = False
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Dict = None
UpperCamelCase_ : Any = None
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Dict = True
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : List[str] = '''.'''
UpperCamelCase_ : Any = None
UpperCamelCase_ : List[str] = '''"'''
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : List[str] = None
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : int = True
UpperCamelCase_ : str = 0
UpperCamelCase_ : str = True
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : str = None
UpperCamelCase_ : List[Any] = 1_0_0_0_0
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : List[Any] = '''strict'''
UpperCamelCase_ : Any = '''error'''
UpperCamelCase_ : Tuple = None
def _A ( self : Any ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.column_names
@property
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = CsvConfig
def _A ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def _A ( self : int , UpperCAmelCase_ : List[str] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Any = data_files
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : Dict = [files]
SCREAMING_SNAKE_CASE : Dict = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : Any = [files]
SCREAMING_SNAKE_CASE : Dict = [dl_manager.iter_files(a__ ) for file in files]
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"files": files} ) )
return splits
def _A ( self : Any , UpperCAmelCase_ : pa.Table ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE : Any = self.config.features.arrow_schema
if all(not require_storage_cast(a__ ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : str = table_cast(a__ , a__ )
return pa_table
def _A ( self : Any , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
SCREAMING_SNAKE_CASE : Dict = pd.read_csv(a__ , iterator=a__ , dtype=a__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = pa.Table.from_pandas(a__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(a__ )}: {e}''' )
raise
| 62 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =XGLMTokenizer
_lowerCamelCase =XGLMTokenizerFast
_lowerCamelCase =True
_lowerCamelCase =True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(a__ ) , 1008 )
def __snake_case ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = XGLMTokenizer(a__ , keep_accents=a__ )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=a__ )
UpperCAmelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def __snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(a__ )
UpperCAmelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(a__ )
UpperCAmelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case ( self : int ):
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __snake_case ( self : Any ):
# fmt: off
UpperCAmelCase = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/xglm-564M''' , padding=a__ , )
| 51 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowercase (UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase = 4_2
class __lowercase (UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 8_8 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = attention_head_dim
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE_ : List[Any] = in_channels
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.GroupNorm(num_groups=a__ , num_channels=a__ , eps=1E-6 , affine=a__ )
SCREAMING_SNAKE_CASE_ : List[str] = nn.Linear(a__ , a__ )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
a__ , a__ , a__ , dropout=a__ , cross_attention_dim=a__ , activation_fn=a__ , attention_bias=a__ , double_self_attention=a__ , norm_elementwise_affine=a__ , )
for d in range(a__ )
] )
SCREAMING_SNAKE_CASE_ : Tuple = nn.Linear(a__ , a__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE_ : Tuple = batch_frames // num_frames
SCREAMING_SNAKE_CASE_ : str = hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states[None, :].reshape(a__ , a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE_ : str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.norm(a__ )
SCREAMING_SNAKE_CASE_ : int = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a__ , a__ )
SCREAMING_SNAKE_CASE_ : str = self.proj_in(a__ )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE_ : Optional[Any] = block(
a__ , encoder_hidden_states=a__ , timestep=a__ , cross_attention_kwargs=a__ , class_labels=a__ , )
# 3. Output
SCREAMING_SNAKE_CASE_ : Tuple = self.proj_out(a__ )
SCREAMING_SNAKE_CASE_ : Any = (
hidden_states[None, None, :]
.reshape(a__ , a__ , a__ , a__ , a__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_states.reshape(a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE_ : List[str] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a__ )
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> YolosConfig:
"""simple docstring"""
UpperCAmelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
UpperCAmelCase = [800, 1_333]
UpperCAmelCase = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = 330
UpperCAmelCase = 14
UpperCAmelCase = 6
UpperCAmelCase = 1_320
elif "yolos_s" in yolos_name:
UpperCAmelCase = 384
UpperCAmelCase = 1_536
UpperCAmelCase = 12
UpperCAmelCase = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase = [800, 1_344]
UpperCAmelCase = 91
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''coco-detection-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosConfig , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
UpperCAmelCase = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
UpperCAmelCase = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
UpperCAmelCase = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
UpperCAmelCase = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
UpperCAmelCase = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : YolosForObjectDetection ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[2] )
UpperCAmelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[
dim : dim * 2, :
]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> str:
"""simple docstring"""
UpperCAmelCase = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# load 🤗 model
UpperCAmelCase = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase = 800 if yolos_name != '''yolos_ti''' else 512
UpperCAmelCase = YolosImageProcessor(format='''coco_detection''' , size=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = outputs.logits, outputs.pred_boxes
UpperCAmelCase, UpperCAmelCase = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
UpperCAmelCase = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='''hustvl''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
from math import isqrt
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) )
def lowercase__ ( __lowercase : int = 10**6 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 399 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
a__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a__ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="whisper"
_lowerCamelCase =["past_key_values"]
_lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , a__ : Any=51865 , a__ : Any=80 , a__ : Dict=6 , a__ : int=4 , a__ : int=6 , a__ : str=4 , a__ : int=1536 , a__ : Optional[Any]=1536 , a__ : str=0.0 , a__ : Optional[int]=0.0 , a__ : Optional[int]=50257 , a__ : int=True , a__ : Optional[int]=True , a__ : str="gelu" , a__ : List[str]=256 , a__ : Union[str, Any]=0.0 , a__ : List[str]=0.0 , a__ : Any=0.0 , a__ : str=0.02 , a__ : str=False , a__ : int=1500 , a__ : Tuple=448 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Optional[Any]=50256 , a__ : Tuple=None , a__ : List[Any]=[220, 50256] , a__ : Optional[int]=False , a__ : Optional[Any]=256 , a__ : Any=False , a__ : int=0.05 , a__ : Optional[Any]=10 , a__ : Dict=2 , a__ : Optional[Any]=0.0 , a__ : Tuple=10 , a__ : Any=0 , a__ : str=7 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = num_mel_bins
UpperCAmelCase = d_model
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = max_source_positions
UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def __snake_case ( self : List[str] ):
UpperCAmelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCAmelCase = {0: '''batch'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def __snake_case ( self : str , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 22050 , a__ : float = 5.0 , a__ : int = 220 , ):
UpperCAmelCase = OrderedDict()
UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
UpperCAmelCase = encoder_inputs['''input_features'''].shape[2]
UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
UpperCAmelCase = encoder_inputs.pop('''input_features''' )
UpperCAmelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def __snake_case ( self : Dict ):
return 1e-3
| 51 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a__ =4_2
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A=3 , A=3 , A=("DownEncoderBlock2D",) , A=(6_4,) , A=2 , A=3_2 , A="silu" , A=True , ) -> Union[str, Any]:
super().__init__()
_UpperCAmelCase : Optional[Any] = layers_per_block
_UpperCAmelCase : Optional[int] = torch.nn.Convad(
a__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCAmelCase : str = None
_UpperCAmelCase : List[str] = nn.ModuleList([] )
# down
_UpperCAmelCase : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(a__ ):
_UpperCAmelCase : List[str] = output_channel
_UpperCAmelCase : Union[str, Any] = block_out_channels[i]
_UpperCAmelCase : Optional[int] = i == len(a__ ) - 1
_UpperCAmelCase : Dict = get_down_block(
a__ , num_layers=self.layers_per_block , in_channels=a__ , out_channels=a__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , )
self.down_blocks.append(a__ )
# mid
_UpperCAmelCase : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# out
_UpperCAmelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a__ , eps=1E-6 )
_UpperCAmelCase : int = nn.SiLU()
_UpperCAmelCase : Tuple = 2 * out_channels if double_z else out_channels
_UpperCAmelCase : List[Any] = nn.Convad(block_out_channels[-1] , a__ , 3 , padding=1 )
_UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = x
_UpperCAmelCase : Dict = self.conv_in(a__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*a__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
_UpperCAmelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , use_reentrant=a__ )
# middle
_UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , use_reentrant=a__ )
else:
for down_block in self.down_blocks:
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ )
# middle
_UpperCAmelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a__ )
else:
# down
for down_block in self.down_blocks:
_UpperCAmelCase : Any = down_block(a__ )
# middle
_UpperCAmelCase : Optional[Any] = self.mid_block(a__ )
# post-process
_UpperCAmelCase : Optional[int] = self.conv_norm_out(a__ )
_UpperCAmelCase : Optional[int] = self.conv_act(a__ )
_UpperCAmelCase : Optional[Any] = self.conv_out(a__ )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A=3 , A=3 , A=("UpDecoderBlock2D",) , A=(6_4,) , A=2 , A=3_2 , A="silu" , A="group" , ) -> List[str]:
super().__init__()
_UpperCAmelCase : List[str] = layers_per_block
_UpperCAmelCase : Any = nn.Convad(
a__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Tuple = nn.ModuleList([] )
_UpperCAmelCase : Any = in_channels if norm_type == '''spatial''' else None
# mid
_UpperCAmelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# up
_UpperCAmelCase : Union[str, Any] = list(reversed(a__ ) )
_UpperCAmelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a__ ):
_UpperCAmelCase : Any = output_channel
_UpperCAmelCase : List[Any] = reversed_block_out_channels[i]
_UpperCAmelCase : List[Any] = i == len(a__ ) - 1
_UpperCAmelCase : Tuple = get_up_block(
a__ , num_layers=self.layers_per_block + 1 , in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , resnet_time_scale_shift=a__ , )
self.up_blocks.append(a__ )
_UpperCAmelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCAmelCase : int = SpatialNorm(block_out_channels[0] , a__ )
else:
_UpperCAmelCase : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a__ , eps=1E-6 )
_UpperCAmelCase : Union[str, Any] = nn.SiLU()
_UpperCAmelCase : Union[str, Any] = nn.Convad(block_out_channels[0] , a__ , 3 , padding=1 )
_UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self , A , A=None ) -> Optional[int]:
_UpperCAmelCase : int = z
_UpperCAmelCase : List[Any] = self.conv_in(a__ )
_UpperCAmelCase : Union[str, Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A ):
def custom_forward(*A ):
return module(*a__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
_UpperCAmelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ , use_reentrant=a__ )
_UpperCAmelCase : Union[str, Any] = sample.to(a__ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , a__ , use_reentrant=a__ )
else:
# middle
_UpperCAmelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ )
_UpperCAmelCase : int = sample.to(a__ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ , a__ )
else:
# middle
_UpperCAmelCase : Union[str, Any] = self.mid_block(a__ , a__ )
_UpperCAmelCase : Optional[int] = sample.to(a__ )
# up
for up_block in self.up_blocks:
_UpperCAmelCase : int = up_block(a__ , a__ )
# post-process
if latent_embeds is None:
_UpperCAmelCase : Dict = self.conv_norm_out(a__ )
else:
_UpperCAmelCase : Any = self.conv_norm_out(a__ , a__ )
_UpperCAmelCase : Optional[int] = self.conv_act(a__ )
_UpperCAmelCase : List[Any] = self.conv_out(a__ )
return sample
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , A , A , A , A=None , A="random" , A=False , A=True ) -> Any:
super().__init__()
_UpperCAmelCase : List[Any] = n_e
_UpperCAmelCase : str = vq_embed_dim
_UpperCAmelCase : int = beta
_UpperCAmelCase : str = legacy
_UpperCAmelCase : Union[str, Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCAmelCase : int = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
_UpperCAmelCase : Any = self.used.shape[0]
_UpperCAmelCase : List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCAmelCase : Any = self.re_embed
_UpperCAmelCase : int = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
_UpperCAmelCase : str = n_e
_UpperCAmelCase : Any = sane_index_shape
def __lowerCAmelCase ( self , A ) -> List[str]:
_UpperCAmelCase : Optional[int] = inds.shape
assert len(a__ ) > 1
_UpperCAmelCase : Tuple = inds.reshape(ishape[0] , -1 )
_UpperCAmelCase : str = self.used.to(a__ )
_UpperCAmelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCAmelCase : Any = match.argmax(-1 )
_UpperCAmelCase : Union[str, Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCAmelCase : List[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCAmelCase : str = self.unknown_index
return new.reshape(a__ )
def __lowerCAmelCase ( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = inds.shape
assert len(a__ ) > 1
_UpperCAmelCase : List[str] = inds.reshape(ishape[0] , -1 )
_UpperCAmelCase : int = self.used.to(a__ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCAmelCase : Optional[int] = 0 # simply set to zero
_UpperCAmelCase : str = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a__ )
return back.reshape(a__ )
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCAmelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCAmelCase : Dict = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCAmelCase : List[Any] = torch.argmin(torch.cdist(a__ , self.embedding.weight ) , dim=1 )
_UpperCAmelCase : str = self.embedding(a__ ).view(z.shape )
_UpperCAmelCase : int = None
_UpperCAmelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_UpperCAmelCase : Union[str, Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCAmelCase : Any = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCAmelCase : int = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCAmelCase : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCAmelCase : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCAmelCase : Optional[int] = self.remap_to_used(a__ )
_UpperCAmelCase : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCAmelCase : Dict = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __lowerCAmelCase ( self , A , A ) -> Dict:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCAmelCase : Dict = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCAmelCase : Dict = self.unmap_to_all(a__ )
_UpperCAmelCase : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCAmelCase : str = self.embedding(a__ )
if shape is not None:
_UpperCAmelCase : Optional[Any] = z_q.view(a__ )
# reshape back to match original input shape
_UpperCAmelCase : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , A , A=False ) -> Tuple:
_UpperCAmelCase : Optional[Any] = parameters
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = torch.chunk(a__ , 2 , dim=1 )
_UpperCAmelCase : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCAmelCase : List[str] = deterministic
_UpperCAmelCase : Any = torch.exp(0.5 * self.logvar )
_UpperCAmelCase : int = torch.exp(self.logvar )
if self.deterministic:
_UpperCAmelCase : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __lowerCAmelCase ( self , A = None ) -> Optional[Any]:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCAmelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=a__ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCAmelCase : int = self.mean + self.std * sample
return x
def __lowerCAmelCase ( self , A=None ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __lowerCAmelCase ( self , A , A=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCAmelCase : Dict = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a__ )
def __lowerCAmelCase ( self ) -> Any:
return self.mean
| 506 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =LEDConfig
_lowerCamelCase ={}
_lowerCamelCase ="gelu"
def __init__( self : Tuple , a__ : Any , a__ : int=13 , a__ : List[Any]=7 , a__ : int=True , a__ : Union[str, Any]=False , a__ : Tuple=99 , a__ : Any=32 , a__ : List[Any]=2 , a__ : Any=4 , a__ : List[Any]=37 , a__ : List[Any]=0.1 , a__ : Any=0.1 , a__ : Optional[int]=20 , a__ : List[Any]=2 , a__ : Union[str, Any]=1 , a__ : List[Any]=0 , a__ : Union[str, Any]=4 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __snake_case ( self : Optional[int] , a__ : List[str] , a__ : int ):
UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1e-3 )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase =(TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase =(
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = TFLEDModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __snake_case ( self : int ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase = 2
UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase = True
UpperCAmelCase = self.model_tester.seq_length
UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ : Tuple ):
UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ : int ):
UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Union[str, Any] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
a__ : int = 1e-4
@slow
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, 768)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 )
def __snake_case ( self : str ):
UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
UpperCAmelCase = model(**a__ )[0]
UpperCAmelCase = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.