code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( _snake_case ):
return data[1:] + data[0]
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = ''''''
for i in range(len(_snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = int("""0b""" + data[0] + data[-1] ,2 )
SCREAMING_SNAKE_CASE__ : int = int("""0b""" + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = message[:4]
SCREAMING_SNAKE_CASE__ : Tuple = message[4:]
SCREAMING_SNAKE_CASE__ : Tuple = apply_table(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = xor(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = apply_sbox(_snake_case ,temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE__ : int = apply_sbox(_snake_case ,temp[4:] )
SCREAMING_SNAKE_CASE__ : Dict = '''0''' * (2 - len(_snake_case )) + l # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = '''0''' * (2 - len(_snake_case )) + r
SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_table(l + r ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xor(_snake_case ,_snake_case )
return temp + right
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input('Enter 10 bit key: ')
UpperCAmelCase__ : List[str] = input('Enter 8 bit message: ')
UpperCAmelCase__ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
UpperCAmelCase__ : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
UpperCAmelCase__ : Any = [2, 4, 3, 1]
UpperCAmelCase__ : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase__ : List[str] = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase__ : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase__ : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase__ : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase__ : int = apply_table(key, paa_table)
UpperCAmelCase__ : str = temp[:5]
UpperCAmelCase__ : Tuple = temp[5:]
UpperCAmelCase__ : List[str] = left_shift(left)
UpperCAmelCase__ : Optional[Any] = left_shift(right)
UpperCAmelCase__ : Dict = apply_table(left + right, pa_table)
UpperCAmelCase__ : Tuple = left_shift(left)
UpperCAmelCase__ : Union[str, Any] = left_shift(right)
UpperCAmelCase__ : Any = left_shift(left)
UpperCAmelCase__ : int = left_shift(right)
UpperCAmelCase__ : Dict = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase__ : Tuple = apply_table(message, IP)
UpperCAmelCase__ : List[Any] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ : List[Any] = temp[4:] + temp[:4]
UpperCAmelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ : Any = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
UpperCAmelCase__ : Optional[Any] = apply_table(CT, IP)
UpperCAmelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ : List[str] = temp[4:] + temp[:4]
UpperCAmelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase__ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 223 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCamelCase : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case__ ( datasets.BuilderConfig):
a_ = None
a_ = "utf-8"
a_ = None
a_ = None
a_ = True # deprecated
a_ = None # deprecated
a_ = 10 << 20 # 10MB
a_ = None
class snake_case__ ( datasets.ArrowBasedBuilder):
a_ = JsonConfig
def A ( self : Union[str, Any] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
UpperCAmelCase_ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Optional[int] , _A : List[str] ) -> int:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
UpperCAmelCase_ : List[Any] = data_files
if isinstance(_A , _A ):
UpperCAmelCase_ : List[str] = [files]
UpperCAmelCase_ : List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCAmelCase_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
UpperCAmelCase_ : Optional[Any] = [files]
UpperCAmelCase_ : List[Any] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def A ( self : List[Any] , _A : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase_ : Union[str, Any] = self.config.features.arrow_schema.field(_A ).type
UpperCAmelCase_ : int = pa_table.append_column(_A , pa.array([None] * len(_A ) , type=_A ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ : Dict = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def A ( self : Optional[Any] , _A : List[Any] ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : Optional[int] = json.load(_A )
# We keep only the field we are interested in
UpperCAmelCase_ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple) ):
UpperCAmelCase_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : str = {col: [row.get(_A ) for row in dataset] for col in keys}
else:
UpperCAmelCase_ : Tuple = dataset
UpperCAmelCase_ : List[str] = pa.Table.from_pydict(_A )
yield file_idx, self._cast_table(_A )
# If the file has one json object per line
else:
with open(_A , '''rb''' ) as f:
UpperCAmelCase_ : Tuple = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ : Optional[Any] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase_ : Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
UpperCAmelCase_ : Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ : List[Any] = batch.decode(self.config.encoding , errors=_A ).encode('''utf-8''' )
try:
while True:
try:
UpperCAmelCase_ : int = paj.read_json(
io.BytesIO(_A ) , read_options=paj.ReadOptions(block_size=_A ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid )
and "straddling" not in str(_A )
or block_size > len(_A )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_A )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase_ : List[Any] = json.load(_A )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase_ : Any = {col: [row.get(_A ) for row in dataset] for col in keys}
UpperCAmelCase_ : Optional[int] = pa.Table.from_pydict(_A )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_A )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(_A )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A )
batch_idx += 1
| 541 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=224 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : int = min_resolution
SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[str] = size
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : int = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def _UpperCamelCase ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = ViTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = EfficientFormerImageProcessorTester(self )
@property
def _UpperCamelCase ( self ) -> int:
return self.image_proc_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
def _UpperCamelCase ( self ) -> List[str]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _UpperCamelCase ( self ) -> int:
# Initialize image_processor
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Tuple = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 714 | '''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = CLIPTokenizer
snake_case__ : Union[str, Any] = CLIPTokenizerFast
snake_case__ : str = True
snake_case__ : Optional[int] = {}
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE : List[str] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase__ ) )
def _UpperCamelCase ( self , **lowercase__ ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def _UpperCamelCase ( self , **lowercase__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : Dict = 'lower newer'
return input_text, output_text
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : str = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def _UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE : Tuple = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE : Tuple = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE : int = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_s.tokenize(lowercase__ )
SCREAMING_SNAKE_CASE : int = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{text_of_1_token} {text_of_1_token}"""
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
SCREAMING_SNAKE_CASE : Optional[int] = F""" {text}"""
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def _UpperCamelCase ( self ) -> int:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _UpperCamelCase ( self ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def _UpperCamelCase ( self ) -> int:
# CLIP always lower cases letters
pass
| 179 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A__: str = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
A__: int = {
'''camembert-base''': 512,
}
A__: Any = '''▁'''
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE :str="</s>" , SCREAMING_SNAKE_CASE :List[str]="<s>" , SCREAMING_SNAKE_CASE :Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE :List[str]="<pad>" , SCREAMING_SNAKE_CASE :List[str]="<mask>" , SCREAMING_SNAKE_CASE :Tuple=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :Any , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_a : Dict =AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
_a : str ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
_a : str =vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_a : Union[str, Any] ={"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_a : Union[str, Any] =len(self.fairseq_tokens_to_ids )
_a : Dict =len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_a : str ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Tuple =[self.cls_token_id]
_a : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None , SCREAMING_SNAKE_CASE :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[int] , SCREAMING_SNAKE_CASE :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_a : str =[self.sep_token_id]
_a : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __UpperCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
_a : int ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] =[]
_a : Tuple =""""""
_a : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
_a : List[Any] =True
_a : List[str] =[]
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
_a : List[Any] =False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Union[str, Any] =self.__dict__.copy()
_a : int =None
return state
def __setstate__( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Optional[int] ={}
_a : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : List[str] =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : Any =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> bool:
_a : Optional[int] =len(_UpperCAmelCase )
_a : Tuple =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_a : Any =True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
_a : int =False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
_a : Optional[Any] =subset[i - 1][j]
if arr[i - 1] <= j:
_a : Union[str, Any] =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 255 , _lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ):
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
lowerCAmelCase_ ,lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ ,lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ ,lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
lowerCAmelCase_ = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Tuple = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = DetaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ ,lowerCAmelCase_ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DetaImageProcessor()
lowerCAmelCase_ = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DetaImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowerCamelCase ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowerCamelCase )
lowerCAmelCase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowerCamelCase ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowerCamelCase ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowerCamelCase ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowerCamelCase )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowerCamelCase ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowerCamelCase ) )
| 713 | '''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_ : List[str] =logging.get_logger(__name__)
def snake_case_ ( __snake_case : int) -> int:
lowerCAmelCase_ = R'''\w+[.]\d+'''
lowerCAmelCase_ = re.findall(__snake_case , __snake_case)
for pat in pats:
lowerCAmelCase_ = key.replace(__snake_case , '''_'''.join(pat.split('''.''')))
return key
def snake_case_ ( __snake_case : str , __snake_case : List[str] , __snake_case : List[Any]) -> List[str]:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case_ ( __snake_case : List[str] , __snake_case : Any , __snake_case : int=42) -> Optional[int]:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__snake_case))
lowerCAmelCase_ = flatten_dict(__snake_case)
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__snake_case)
lowerCAmelCase_ = tuple(renamed_pt_key.split('''.'''))
# Correctly rename weight parameters
lowerCAmelCase_ ,lowerCAmelCase_ = rename_key_and_reshape_tensor(__snake_case , __snake_case , __snake_case)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''')
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__snake_case)
return unflatten_dict(__snake_case)
| 606 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
SCREAMING_SNAKE_CASE_ = 'zero2'
SCREAMING_SNAKE_CASE_ = 'zero3'
SCREAMING_SNAKE_CASE_ = [ZEROa, ZEROa]
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = parameterized.to_safe_name('''_'''.join(str(_lowercase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Dict:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Union[str, Any]:
UpperCamelCase = models[model]
UpperCamelCase = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_)
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Dict:
UpperCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowerCamelCase_)
UpperCamelCase = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCamelCase = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
UpperCamelCase = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
UpperCamelCase = self.get_launcher(lowerCamelCase_)
UpperCamelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_=False) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCamelCase = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 34 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 709 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Dict = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case_ ( __A ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = 8
# DPR tok
lowerCamelCase_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : List[str] = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCamelCase_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCamelCase_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCamelCase_ : Any = {"unk_token": "<unk>"}
lowerCamelCase_ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase_ : Tuple = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ : int = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
lowerCamelCase_ : Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
lowerCamelCase_ : str = self.get_dummy_dataset()
lowerCamelCase_ : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Tuple = dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : bool ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.get_dummy_dataset()
lowerCamelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , "dataset" )
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowerCamelCase_ : Optional[int] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCamelCase_ : List[Any] = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __magic_name__ ) , )
return retriever
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : str = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowerCamelCase_ : Optional[int] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__magic_name__ , open(__magic_name__ , "wb" ) )
lowerCamelCase_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCamelCase_ : Any = RagRetriever(
__magic_name__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : int = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
lowerCamelCase_ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCamelCase_ : Dict = self.get_dummy_dataset()
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : List[Any] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
lowerCamelCase_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
lowerCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : str = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : List[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Dict = self.get_dummy_legacy_index_retriever()
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[str] = retriever.retrieve(__magic_name__ , n_docs=__magic_name__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__magic_name__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __magic_name__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
lowerCamelCase_ : List[str] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__magic_name__ )
lowerCamelCase_ : Optional[int] = RagRetriever.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Optional[Any] = retriever.retrieve(__magic_name__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
import torch
lowerCamelCase_ : Dict = 1
lowerCamelCase_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase_ : str = [[5, 7], [10, 11]]
lowerCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : Dict = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertIsInstance(__magic_name__ , np.ndarray )
lowerCamelCase_ : Dict = retriever(
__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ , return_tensors="pt" , )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
self.assertIsInstance(__magic_name__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__magic_name__ )
retriever.set_ctx_encoder_tokenizer(__magic_name__ )
lowerCamelCase_ : List[Any] = [[5, 7], [10, 11]]
lowerCamelCase_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCamelCase_ : int = retriever(__magic_name__ , __magic_name__ , prefix=retriever.config.generator.prefix , n_docs=__magic_name__ )
self.assertEqual(
len(__magic_name__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __magic_name__ ) # check for doc token related keys in dictionary.
| 488 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : int = False, False, False
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = None
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "dict"
UpperCAmelCase__ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase__ = field(default="""Audio""", init=__UpperCamelCase, repr=__UpperCamelCase )
def __call__( self : Optional[Any] ) -> Dict:
return self.pa_type
def A_ ( self : List[str] , UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase__ : List[str] = BytesIO()
sf.write(UpperCAmelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase__ : Optional[int] = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowerCamelCase__ : Union[str, Any] = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767
lowerCamelCase__ : str = BytesIO(bytes() )
sf.write(UpperCAmelCase , UpperCAmelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def A_ ( self : int , UpperCAmelCase : dict , UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
lowerCamelCase__ , lowerCamelCase__ : Any = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
lowerCamelCase__ : Tuple = xsplitext(UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
lowerCamelCase__ : Optional[Any] = token_per_repo_id or {}
lowerCamelCase__ : Dict = path.split('::' )[-1]
try:
lowerCamelCase__ : int = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
lowerCamelCase__ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase__ : Tuple = None
with xopen(UpperCAmelCase , 'rb' , use_auth_token=UpperCAmelCase ) as f:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = sf.read(UpperCAmelCase )
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = sf.read(UpperCAmelCase )
lowerCamelCase__ : Any = array.T
if self.mono:
lowerCamelCase__ : Tuple = librosa.to_mono(UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase__ : int = librosa.resample(UpperCAmelCase , orig_sr=UpperCAmelCase , target_sr=self.sampling_rate )
lowerCamelCase__ : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A_ ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Tuple = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
lowerCamelCase__ : Any = pa.array([Audio().encode_example(UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
lowerCamelCase__ : int = storage.field('bytes' )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
lowerCamelCase__ : Union[str, Any] = storage.field('path' )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def A_ ( self : Dict , UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase : int ):
with xopen(UpperCAmelCase , 'rb' ) as f:
lowerCamelCase__ : Any = f.read()
return bytes_
lowerCamelCase__ : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : List[str] = pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
| 188 |
_UpperCAmelCase : str = """Tobias Carryer"""
from time import time
class lowerCAmelCase :
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : str=int(time() ) ) -> Optional[int]: # noqa: B008
lowerCamelCase__ : List[Any] = multiplier
lowerCamelCase__ : Tuple = increment
lowerCamelCase__ : int = modulo
lowerCamelCase__ : Any = seed
def A_ ( self : Optional[int] ) -> List[Any]:
lowerCamelCase__ : List[str] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_UpperCAmelCase : List[Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 188 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase_ ( __a="" ) -> str:
a__ : Dict = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
a__ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
a__ : Tuple = AgentAudio(lowerCamelCase__ )
a__ : Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
a__, a__ : int = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1E-4 ) )
def _UpperCamelCase( self : int ):
a__ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
a__ : Any = get_new_path(suffix=".wav" )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 16_000 )
a__ : int = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Union[str, Any] = torch.randint(0 , 256 , (64, 64, 3) )
a__ : Optional[int] = AgentImage(lowerCamelCase__ )
a__ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
a__ : Any = Image.open(lowerCamelCase__ )
a__ : Tuple = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def _UpperCamelCase( self : str ):
a__ : Any = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
a__ : Any = Image.open(lowerCamelCase__ )
a__ : Tuple = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Tuple = "Hey!"
a__ : Tuple = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 37 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ):
super().__init__()
a__ : int = module
a__ : Any = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , )
a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase = 'bigscience/bloom-1b7'
# Constant values
_lowercase = 2.1_09_65_95_52_69_25_74
_lowercase = 'Hello my name is'
_lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_lowercase = 1_0
def _UpperCamelCase( self : Dict ):
# Models and tokenizer
a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Union[str, Any] ):
super().setUp()
# Models and tokenizer
a__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) )
a__ : Optional[Any] = config.to_dict()
a__ : int = config.to_diff_dict()
a__ : List[str] = config.to_json_string()
def _UpperCamelCase( self : int ):
from bitsandbytes.nn import Paramsabit
a__ : List[Any] = self.model_fpaa.get_memory_footprint()
a__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCamelCase( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCamelCase( self : str ):
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = BitsAndBytesConfig()
a__ : Optional[int] = True
a__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" )
a__ : str = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : int = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
def _UpperCamelCase( self : Dict ):
with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : int = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def _UpperCamelCase( self : int ):
with self.assertRaises(lowerCamelCase__ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a__ : int = self.tokenizer(self.input_text , return_tensors="pt" )
a__ : Any = self.model_fpaa.to(torch.floataa )
a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
a__ : Dict = self.model_fpaa.float()
def _UpperCamelCase( self : Dict ):
a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCamelCase( cls : str ):
a__ : Dict = "t5-small"
a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a__ : int = AutoTokenizer.from_pretrained(cls.model_name )
a__ : str = "Translate in German: Hello, my dog is cute"
def _UpperCamelCase( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Optional[int] ):
from transformers import TaForConditionalGeneration
a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules
a__ : Optional[Any] = None
# test with `t5-small`
a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Any = model.generate(**lowerCamelCase__ )
a__ : Union[str, Any] = modules
def _UpperCamelCase( self : List[Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : int = model.generate(**lowerCamelCase__ )
# test with `flan-t5-small`
a__ : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a__ : Optional[int] = model.generate(**lowerCamelCase__ )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] ):
super().setUp()
# model_name
a__ : Union[str, Any] = "bigscience/bloom-560m"
a__ : Union[str, Any] = "t5-small"
# Different types of model
a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Sequence classification model
a__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# CausalLM model
a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
# Seq2seq model
a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" )
def _UpperCamelCase( self : List[Any] ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
super().setUp()
def _UpperCamelCase( self : int ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : Tuple ):
a__ : int = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a__ : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Tuple ):
super().setUp()
def _UpperCamelCase( self : List[Any] ):
a__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS )
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : Any = "facebook/opt-350m"
super().setUp()
def _UpperCamelCase( self : int ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a__ : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase__ ) ):
a__ : Dict = LoRALayer(module.q_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.k_proj , rank=16 )
a__ : List[Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a__ : Optional[Any] = model.forward(**lowerCamelCase__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'gpt2-xl'
_lowercase = 3.31_91_85_48_54_15_21_87
| 37 | 1 |
'''simple docstring'''
from __future__ import annotations
lowercase_ = tuple[int, int, int]
lowercase_ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase_ = "EGZWVONAHDCLFQMSIPJBYUKXTR"
lowercase_ = "FOBHMDKEXQNRAULPGSJVTYICZW"
lowercase_ = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
lowercase_ = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
lowercase_ = "RMDJXFUWGISLHVTCQNKYPBEZOA"
lowercase_ = "SGLCPQWZHKXAREONTFBVIYJUDM"
lowercase_ = "HVSICLTYKQUBXDWAJZOMFGPREN"
lowercase_ = "RZWQHFMVDBKICJLNTUXAGYPSOE"
lowercase_ = "LFKIJODBEGAMQPXVUHYSTCZRWN"
lowercase_ = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if (unique_rotsel := len(set(__A))) < 3:
_a = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(__A)
# Checks if rotor positions are valid
_a , _a , _a = rotpos
if not 0 < rotorposa <= len(__A):
_a = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(__A)
if not 0 < rotorposa <= len(__A):
_a = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(__A)
if not 0 < rotorposa <= len(__A):
_a = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(__A)
# Validates string and returns dict
_a = _plugboard(__A)
return rotpos, rotsel, pbdict
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
_a = F'''Plugboard setting isn\'t type string ({type(__A)})'''
raise TypeError(__A)
elif len(__A) % 2 != 0:
_a = F'''Odd number of symbols ({len(__A)})'''
raise Exception(__A)
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''')
# Checks if all characters are unique
_a = set()
for i in pbstring:
if i not in abc:
_a = F'''\'{i}\' not in list of symbols'''
raise Exception(__A)
elif i in tmppbl:
_a = F'''Duplicate symbol ({i})'''
raise Exception(__A)
else:
tmppbl.add(__A)
del tmppbl
# Created the dictionary
_a = {}
for j in range(0 , len(__A) - 1 , 2):
_a = pbstring[j + 1]
_a = pbstring[j]
return pb
def lowerCAmelCase (__A , __A , __A = (rotora, rotora, rotora) , __A = "" , ):
"""simple docstring"""
_a = text.upper()
_a , _a , _a = _validator(
__A , __A , plugb.upper())
_a , _a , _a = rotor_position
_a , _a , _a = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_a = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_a = plugboard[symbol]
# rotor ra --------------------------
_a = abc.index(__A) + rotorposa
_a = rotora[index % len(__A)]
# rotor rb --------------------------
_a = abc.index(__A) + rotorposa
_a = rotora[index % len(__A)]
# rotor rc --------------------------
_a = abc.index(__A) + rotorposa
_a = rotora[index % len(__A)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_a = reflector[symbol]
# 2nd rotors
_a = abc[rotora.index(__A) - rotorposa]
_a = abc[rotora.index(__A) - rotorposa]
_a = abc[rotora.index(__A) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_a = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__A):
_a = 0
rotorposa += 1
if rotorposa >= len(__A):
_a = 0
rotorposa += 1
if rotorposa >= len(__A):
_a = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__A)
return "".join(__A)
if __name__ == "__main__":
lowercase_ = "This is my Python script that emulates the Enigma machine from WWII."
lowercase_ = (1, 1, 1)
lowercase_ = "pictures"
lowercase_ = (rotora, rotora, rotora)
lowercase_ = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 704 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = 256
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['melgan']
def __init__(self , A , A , A , A , A , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
_a = math.log(1E-5 ) # Matches MelGAN training.
_a = 4.0 # Largest value for most examples
_a = 128
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def a__ (self , A , A=(-1.0, 1.0) , A=False ) -> List[str]:
"""simple docstring"""
_a , _a = output_range
if clip:
_a = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
_a = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a__ (self , A , A=(-1.0, 1.0) , A=False ) -> Optional[Any]:
"""simple docstring"""
_a , _a = input_range
_a = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
_a = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a__ (self , A , A , A ) -> Optional[int]:
"""simple docstring"""
_a = input_tokens > 0
_a , _a = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
_a , _a = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a__ (self , A , A , A ) -> Any:
"""simple docstring"""
_a = noise_time
if not torch.is_tensor(A ):
_a = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
_a = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_a = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_a = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__(self , A , A = None , A = 100 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
_a = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_a = np.zeros([1, 0, self.n_dims] , np.floataa )
_a = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
_a = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_a = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_a = ones
_a = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
_a = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_a = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_a = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_a = self.scheduler.step(A , A , A , generator=A ).prev_sample
_a = self.scale_to_features(A , input_range=[-1.0, 1.0] )
_a = mel[:1]
_a = mel.cpu().float().numpy()
_a = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_a = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_a = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A )
| 352 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _A , unittest.TestCase ):
lowercase = None
lowercase = BloomTokenizerFast
lowercase = BloomTokenizerFast
lowercase = True
lowercase = False
lowercase = 'tokenizer_file'
lowercase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def __a ( self : str ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = self.get_rust_tokenizer()
lowercase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowercase = tokenizer.batch_encode_plus(__lowerCamelCase )['''input_ids''']
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowercase = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[Any] , __lowerCamelCase : List[Any]=6 ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase = '''This is a simple input'''
lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase = ('''This is a simple input''', '''This is a pair''')
lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase = None # Hotfixing padding = None
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , )
def __a ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_rust_tokenizer()
lowercase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCamelCase )
lowercase = next(iter(__lowerCamelCase ) )['''premise'''] # pick up one data
lowercase = list(sample_data.values() )
lowercase = list(map(tokenizer.encode , __lowerCamelCase ) )
lowercase = [tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) for x in output_tokens]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 604 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _a :
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(A__ ) for k, v in self.__dict__.items()} )
| 703 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : str = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : List[Any] = LEDConfig
UpperCamelCase_ : Tuple = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=37 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=20 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Tuple=4 , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : str = eos_token_id
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Tuple = prepare_led_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tf.concat(
[tf.zeros_like(UpperCAmelCase_ )[:, :-1], tf.ones_like(UpperCAmelCase_ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : int = global_attention_mask
return config, inputs_dict
def _A ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : str = TFLEDModel(config=UpperCAmelCase_ ).get_decoder()
SCREAMING_SNAKE_CASE : str = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : List[str] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Any = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Tuple = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase_ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : int = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : int = False
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ )
def _A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[Any] = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_decoder_attentions_output(UpperCAmelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _A ( self : List[Any] ):
pass
def _A ( self : Optional[Any] ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
snake_case = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : int = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE : Any = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 , rtol=1E-3 )
| 62 |
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase ( snake_case : Callable[[float], float] , snake_case : float , snake_case : float ):
_lowerCAmelCase:float = xa
_lowerCAmelCase:float = xa
while True:
if x_n == x_na or function(snake_case ) == function(snake_case ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCAmelCase:float = x_na - (
function(snake_case ) / ((function(snake_case ) - function(snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase:Optional[int] = x_na
_lowerCAmelCase:Dict = x_na
def UpperCAmelCase ( snake_case : float ):
return math.pow(snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 227 | 0 |
'''simple docstring'''
import random
def UpperCamelCase__ ( _lowercase : int , _lowercase : float , _lowercase : bool = False ) -> dict:
__UpperCAmelCase: dict = {i: [] for i in range(_lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowercase ):
for j in range(i + 1 , _lowercase ):
if random.random() < probability:
graph[i].append(_lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowercase )
return graph
def UpperCamelCase__ ( _lowercase : int ) -> dict:
return {
i: [j for j in range(_lowercase ) if i != j] for i in range(_lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | '''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_ = numpy.array([0, 0])
SCREAMING_SNAKE_CASE_ = numpy.array([0.5, 0.866_0254])
SCREAMING_SNAKE_CASE_ = numpy.array([1, 0])
SCREAMING_SNAKE_CASE_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] , _lowercase : int ) -> list[numpy.ndarray]:
__UpperCAmelCase: int = initial_vectors
for _ in range(_lowercase ):
__UpperCAmelCase: Any = iteration_step(_lowercase )
return vectors
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
__UpperCAmelCase: Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCAmelCase: Union[str, Any] = vectors[i + 1]
new_vectors.append(_lowercase )
__UpperCAmelCase: Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase__ ( _lowercase : numpy.ndarray , _lowercase : float ) -> numpy.ndarray:
__UpperCAmelCase: Tuple = numpy.radians(_lowercase )
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = numpy.cos(_lowercase ), numpy.sin(_lowercase )
__UpperCAmelCase: Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> None:
__UpperCAmelCase: Union[str, Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCAmelCase, __UpperCAmelCase: Dict = zip(*_lowercase )
plt.plot(_lowercase , _lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 466 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ["input_values", "padding_mask"]
def __init__( self : Dict , lowercase__ : int = 1 , lowercase__ : int = 2_4_0_0_0 , lowercase__ : float = 0.0 , lowercase__ : float = None , lowercase__ : float = None , **lowercase__ : List[Any] , ):
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__ )
__lowercase : List[Any] = chunk_length_s
__lowercase : Union[str, Any] = overlap
@property
def snake_case ( self : Dict ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case ( self : str ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : str , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : Optional[Union[bool, str, PaddingStrategy]] = None , lowercase__ : Optional[bool] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__lowercase : List[str] = True
__lowercase : str = bool(
isinstance(lowercase__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__lowercase : Union[str, Any] = [np.asarray(lowercase__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowercase__ , np.ndarray ):
__lowercase : str = np.asarray(lowercase__ , dtype=np.floataa )
elif isinstance(lowercase__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__lowercase : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase : List[Any] = [np.asarray(lowercase__ ).T]
# verify inputs are valid
for idx, example in enumerate(lowercase__ ):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' )
__lowercase : Optional[int] = None
__lowercase : List[str] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__lowercase : List[str] = min(array.shape[0] for array in raw_audio )
__lowercase : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
__lowercase : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__lowercase : Optional[Any] = max(array.shape[0] for array in raw_audio )
__lowercase : int = int(np.ceil(max_length / self.chunk_stride ) )
__lowercase : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
__lowercase : List[str] = "max_length"
else:
__lowercase : List[str] = input_values
# normal padding on batch
if padded_inputs is None:
__lowercase : List[str] = self.pad(
lowercase__ , max_length=lowercase__ , truncation=lowercase__ , padding=lowercase__ , return_attention_mask=lowercase__ , )
if padding:
__lowercase : int = padded_inputs.pop("attention_mask" )
__lowercase : Any = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__lowercase : Optional[int] = example[..., None]
input_values.append(example.T )
__lowercase : List[str] = input_values
if return_tensors is not None:
__lowercase : str = padded_inputs.convert_to_tensors(lowercase__ )
return padded_inputs
| 575 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575 | 1 |
import argparse
import os
import re
import packaging.version
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''examples/'''
__SCREAMING_SNAKE_CASE : Tuple = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__SCREAMING_SNAKE_CASE : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__SCREAMING_SNAKE_CASE : List[str] = '''README.md'''
def snake_case_ ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : List[Any] ):
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase , _lowerCAmelCase =REPLACE_PATTERNS[pattern]
_lowerCAmelCase =replace.replace("""VERSION""" , lowercase__ )
_lowerCAmelCase =re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowercase__ )
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="""examples""" )
def snake_case_ ( lowercase__ : Dict , lowercase__ : str=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase ="""🤗 Transformers currently provides the following architectures"""
_lowerCAmelCase ="""1. Want to contribute a new model?"""
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase =f.readlines()
# Find the start of the list.
_lowerCAmelCase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_lowerCAmelCase =lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowercase__ )
def snake_case_ ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase =REPLACE_PATTERNS["""init"""][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def snake_case_ ( lowercase__ : Any=False ):
'''simple docstring'''
_lowerCAmelCase =get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_lowerCAmelCase =default_version.base_version
elif patch:
_lowerCAmelCase =f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
_lowerCAmelCase =f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
_lowerCAmelCase =input(f"Which version are you releasing? [{default_version}]" )
if len(lowercase__ ) == 0:
_lowerCAmelCase =default_version
print(f"Updating version to {version}." )
global_version_update(lowercase__ , patch=lowercase__ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =get_version()
_lowerCAmelCase =f"{current_version.major}.{current_version.minor + 1}.0.dev0"
_lowerCAmelCase =current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase =input(f"Which version are we developing now? [{dev_version}]" )
if len(lowercase__ ) == 0:
_lowerCAmelCase =dev_version
print(f"Updating version to {version}." )
global_version_update(lowercase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 149 |
from maths.prime_factors import prime_factors
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase =f"Input value of [number={number}] must be an integer"
raise TypeError(lowercase__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
for e in env_keys:
_lowerCamelCase : List[Any] = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="no" ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value | 46 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 475 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "switch_transformers"
__lowerCamelCase : str = ["past_key_values"]
__lowerCamelCase : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _lowerCAmelCase=32128 , _lowerCAmelCase=768 , _lowerCAmelCase=64 , _lowerCAmelCase=2048 , _lowerCAmelCase=64 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=12 , _lowerCAmelCase=8 , _lowerCAmelCase=False , _lowerCAmelCase=0.01 , _lowerCAmelCase="float32" , _lowerCAmelCase=False , _lowerCAmelCase=32 , _lowerCAmelCase=128 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=0.001 , _lowerCAmelCase=0.001 , _lowerCAmelCase=1.0 , _lowerCAmelCase="relu" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ) -> Optional[int]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = d_model
_lowerCAmelCase = d_kv
_lowerCAmelCase = d_ff
_lowerCAmelCase = num_sparse_encoder_layers
_lowerCAmelCase = num_layers
_lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCAmelCase = num_heads
_lowerCAmelCase = num_experts
_lowerCAmelCase = expert_capacity
_lowerCAmelCase = router_bias
_lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_lowerCAmelCase = router_dtype
_lowerCAmelCase = router_ignore_padding_tokens
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = relative_attention_max_distance
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = feed_forward_proj
_lowerCAmelCase = use_cache
_lowerCAmelCase = add_router_probs
_lowerCAmelCase = router_z_loss_coef
_lowerCAmelCase = router_aux_loss_coef
_lowerCAmelCase = self.feed_forward_proj.split("-" )
_lowerCAmelCase = act_info[-1]
_lowerCAmelCase = act_info[0] == "gated"
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase = "gelu_new"
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase , )
| 489 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = "".join(sorted(SCREAMING_SNAKE_CASE_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __a(SCREAMING_SNAKE_CASE_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 489 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
SCREAMING_SNAKE_CASE_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = "left"
def __init__( self , a_ , a_=False , a_=True , a_=False , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<sep>" , a_="<pad>" , a_="<cls>" , a_="<mask>" , a_=["<eop>", "<eod>"] , a_ = None , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it
a_ : int = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
a_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
a_ : Optional[int] = 3
a_ : str = do_lower_case
a_ : int = remove_space
a_ : Dict = keep_accents
a_ : Tuple = vocab_file
a_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def snake_case_ ( self ):
return len(self.sp_model )
def snake_case_ ( self ):
a_ : Dict = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a_ : List[Any] = self.__dict__.copy()
a_ : Optional[int] = None
return state
def __setstate__( self , a_ ):
a_ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a_ : str = {}
a_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ ):
if self.remove_space:
a_ : Optional[int] = " ".join(inputs.strip().split() )
else:
a_ : List[str] = inputs
a_ : Optional[Any] = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
a_ : Dict = unicodedata.normalize("NFKD" , snake_case_ )
a_ : int = "".join([c for c in outputs if not unicodedata.combining(snake_case_ )] )
if self.do_lower_case:
a_ : Union[str, Any] = outputs.lower()
return outputs
def snake_case_ ( self , a_ ):
a_ : int = self.preprocess_text(snake_case_ )
a_ : Optional[int] = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
a_ : str = []
for piece in pieces:
if len(snake_case_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a_ : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a_ : int = cur_pieces[1:]
else:
a_ : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case_ )
else:
new_pieces.append(snake_case_ )
return new_pieces
def snake_case_ ( self , a_ ):
return self.sp_model.PieceToId(snake_case_ )
def snake_case_ ( self , a_ ):
return self.sp_model.IdToPiece(snake_case_ )
def snake_case_ ( self , a_ ):
a_ : List[str] = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def snake_case_ ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ):
a_ : int = kwargs.pop("use_source_tokenizer" , snake_case_ )
a_ : Union[str, Any] = self.convert_ids_to_tokens(snake_case_ , skip_special_tokens=snake_case_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a_ : List[Any] = []
a_ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
a_ : List[str] = []
sub_texts.append(snake_case_ )
else:
current_sub_text.append(snake_case_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a_ : Tuple = "".join(snake_case_ )
a_ : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a_ : int = self.clean_up_tokenization(snake_case_ )
return clean_text
else:
return text
def snake_case_ ( self , a_ , a_ = None ):
a_ : Optional[int] = [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case_ ( self , a_ , a_ = None , a_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is not None:
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1, 1]
return ([0] * len(snake_case_ )) + [1, 1]
def snake_case_ ( self , a_ , a_ = None ):
a_ : Union[str, Any] = [self.sep_token_id]
a_ : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case_ ( self , a_ , a_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
a_ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,) | 237 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCAmelCase : Dict = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase ( lowercase_ : Union[str, Version] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase =STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
lowercase =parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 72 | 0 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = ''''''
for i in table:
res += inp[i - 1]
return res
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
snake_case_ = ''''''
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = int('''0b''' + data[0] + data[-1] , 2 )
snake_case_ = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = message[:4]
snake_case_ = message[4:]
snake_case_ = apply_table(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE , temp[4:] )
snake_case_ = '''0''' * (2 - len(SCREAMING_SNAKE_CASE )) + l # noqa: E741
snake_case_ = '''0''' * (2 - len(SCREAMING_SNAKE_CASE )) + r
snake_case_ = apply_table(l + r , SCREAMING_SNAKE_CASE )
snake_case_ = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
UpperCAmelCase = input("""Enter 10 bit key: """)
UpperCAmelCase = input("""Enter 8 bit message: """)
UpperCAmelCase = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCAmelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCAmelCase = [2, 4, 3, 1]
UpperCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase = apply_table(key, paa_table)
UpperCAmelCase = temp[:5]
UpperCAmelCase = temp[5:]
UpperCAmelCase = left_shift(left)
UpperCAmelCase = left_shift(right)
UpperCAmelCase = apply_table(left + right, pa_table)
UpperCAmelCase = left_shift(left)
UpperCAmelCase = left_shift(right)
UpperCAmelCase = left_shift(left)
UpperCAmelCase = left_shift(right)
UpperCAmelCase = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase = apply_table(message, IP)
UpperCAmelCase = function(expansion, sa, sa, keya, temp)
UpperCAmelCase = temp[4:] + temp[:4]
UpperCAmelCase = function(expansion, sa, sa, keya, temp)
UpperCAmelCase = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCAmelCase = apply_table(CT, IP)
UpperCAmelCase = function(expansion, sa, sa, keya, temp)
UpperCAmelCase = temp[4:] + temp[:4]
UpperCAmelCase = function(expansion, sa, sa, keya, temp)
UpperCAmelCase = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT) | 531 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.type_sequence_label_size
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case_ = problem_type['''title''']
snake_case_ = problem_type['''num_labels''']
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
snake_case_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
snake_case_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase ) | 531 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ,return_dict=_snake_case ).to(_snake_case )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ : int = tokenizer("Hello there" ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : str = tokenizer("Hi I am" ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : Optional[int] = model(input_ids.to(_snake_case ) ,labels=labels.to(_snake_case ) ).loss
UpperCAmelCase_ : Any = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ : Tuple = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 71 |
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 143 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :int = "bert"
def __init__( self : Optional[int] , UpperCamelCase : Dict=3_05_22 , UpperCamelCase : List[str]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : Any=30_72 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Any=5_12 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Any=0.02 , UpperCamelCase : Any=1E-1_2 , UpperCamelCase : Tuple=0 , UpperCamelCase : List[Any]="absolute" , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=None , **UpperCamelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Dict = classifier_dropout
class _lowerCamelCase ( a_ ):
@property
def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 717 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = """▁"""
_A = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_A = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_A = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_A = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
_A = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = ["input_ids"]
_lowerCamelCase :Any = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :List[Any] = RESOURCE_FILES_NAMES
def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=False , UpperCamelCase : int="utf8" , UpperCamelCase : List[Any]="[UNK]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : str="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , vocab_file=UpperCamelCase , encoding=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowerCAmelCase__ : Any = do_lower_case
lowerCAmelCase__ : Optional[Any] = sentencepiece_model_ckpt
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase__ : Optional[int] = self.load_vocab(filepath=UpperCamelCase )
else:
lowerCAmelCase__ : Tuple = {self.sp_model.id_to_piece(UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase__ : List[str] = {v: k for k, v in self.vocab.items()}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Any:
"""simple docstring"""
if text is None:
return None
lowerCAmelCase__ : Optional[Any] = self.tokenize(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = """""", []
for i, ch in enumerate(UpperCamelCase ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase__ : Union[str, Any] = self.SP_CHAR_MAPPING.get(UpperCamelCase )
else:
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFKC""" , UpperCamelCase )
if self.is_whitespace(UpperCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase__ : List[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase__ : List[str] = token[1:]
lowerCAmelCase__ : Dict = text[offset:].index(UpperCamelCase ) + offset
lowerCAmelCase__ : List[Any] = start + len(UpperCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase__ : Optional[int] = end
return token_mapping
@property
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.vocab )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] , UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> str:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase , UpperCamelCase ) for c in text) )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=64 , UpperCamelCase : List[Any]=0.1 ) -> Any:
"""simple docstring"""
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
lowerCAmelCase__ : Union[str, Any] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
lowerCAmelCase__ : Union[str, Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(UpperCamelCase )
else:
lowerCAmelCase__ : List[str] = self.sp_model.SampleEncodeAsPieces(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = []
for pi, piece in enumerate(UpperCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase ) and pi != 0:
new_pieces.append(UpperCamelCase )
continue
else:
continue
lowerCAmelCase__ : List[Any] = 0
for i, chunk in enumerate(UpperCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase ) or self.is_punct(UpperCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase__ : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase__ : Any = i
if len(UpperCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.convert_ids_to_tokens(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return self.vocab.get(UpperCamelCase , self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase , self.unk_token )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple=None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any]=None ) -> Any:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict=None , UpperCamelCase : Any=False ) -> List[Any]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase ) + 1) + [1] * (len(UpperCamelCase ) + 3)
def _lowerCAmelCase ( self : Any , UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowerCAmelCase ( self : int , UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase ) == 1:
lowerCAmelCase__ : List[Any] = unicodedata.category(UpperCamelCase )
if cat == "Zs":
return True
return False
def _lowerCAmelCase ( self : str , UpperCamelCase : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = {}
with io.open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase ):
lowerCAmelCase__ : Any = line.rstrip("""\n""" )
lowerCAmelCase__ : Optional[Any] = int(UpperCamelCase )
return token_to_idx
def _lowerCAmelCase ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
if os.path.isdir(UpperCamelCase ):
lowerCAmelCase__ : List[str] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCAmelCase__ : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """sentencepiece.bpe.model""" )
with open(UpperCamelCase , """wb""" ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (vocab_file,)
| 507 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 0 |
def a (lowerCAmelCase__ = 100 ):
__a = set()
__a = 0
__a = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase__ ):
for b in range(2 , lowerCAmelCase__ ):
__a = a**b # calculates the current power
collect_powers.add(lowerCAmelCase__ ) # adds the result to the set
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 209 |
from __future__ import annotations
SCREAMING_SNAKE_CASE = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def snake_case_ ( self ):
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__A )
__a = vertex
queue.append(__A )
def snake_case_ ( self , __A ):
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__A )
if target_vertex_parent is None:
__a = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__A )
return self.shortest_path(__A ) + f'''->{target_vertex}'''
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 209 | 1 |
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
UpperCamelCase : Tuple = False
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
for word in words:
self.insert(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
UpperCamelCase : int = TrieNode()
UpperCamelCase : List[str] = curr.nodes[char]
UpperCamelCase : int = True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase : Any = curr.nodes[char]
return curr.is_leaf
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase : Union[str, Any] = False
return len(curr.nodes ) == 0
UpperCamelCase : List[str] = word[index]
UpperCamelCase : Any = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase : Any = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
if node.is_leaf:
print(_lowerCAmelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCAmelCase , word + key )
def A_ ( ) -> bool:
UpperCamelCase : Optional[Any] = "banana bananas bandana band apple all beast".split()
UpperCamelCase : Any = TrieNode()
root.insert_many(_lowerCAmelCase )
# print_words(root, "")
assert all(root.find(_lowerCAmelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
print(str(_lowerCAmelCase ) , "works!" if passes else "doesn't work :(" )
def A_ ( ) -> None:
assert test_trie()
def A_ ( ) -> None:
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 629 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ : Tuple = 256_047
A__ : Dict = 256_145
@require_sentencepiece
@require_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = NllbTokenizer
_A = NllbTokenizerFast
_A = True
_A = True
_A = {}
def lowerCAmelCase__ ( self )-> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Dict = NllbTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
UpperCAmelCase__ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = tokenizer_r.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Tuple = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ )
UpperCAmelCase__ : List[Any] = tokenizer_p.save_pretrained(lowerCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : int = tokenizer_p.from_pretrained(lowerCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) )
shutil.rmtree(lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self )-> List[str]:
if not self.test_seqaseq:
return
UpperCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Tuple = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase__ : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
UpperCAmelCase__ : str = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
lowerCamelCase__ , tgt_texts=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , lowerCamelCase__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def lowerCAmelCase__ ( self )-> Tuple:
pass
def lowerCAmelCase__ ( self )-> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : str = [AddedToken("<special>" , lstrip=lowerCamelCase__ )]
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : str = tokenizer_r.encode("Hey this is a <special> token" )
UpperCAmelCase__ : str = tokenizer_r.encode("<special>" , add_special_tokens=lowerCamelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__ : Tuple = tokenizer_p.encode("Hey this is a <special> token" )
UpperCAmelCase__ : Dict = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_A = "facebook/nllb-200-distilled-600M"
_A = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_A = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_A = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def lowerCAmelCase__ ( cls )-> Union[str, Any]:
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
UpperCAmelCase__ : str = 1
return cls
def lowerCAmelCase__ ( self )-> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> List[str]:
self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : List[Any] = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[int] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCamelCase__ )
UpperCAmelCase__ : Tuple = 10
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self )-> Dict:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def lowerCAmelCase__ ( self )-> int:
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ : int = NllbTokenizer.from_pretrained(lowerCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10 , return_tensors="pt" )
UpperCAmelCase__ : List[Any] = targets["input_ids"]
UpperCAmelCase__ : Tuple = shift_tokens_right(
lowerCamelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Optional[Any] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 718 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Tuple = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F"{len(lowerCAmelCase )} != {len(lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a__ ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
'''simple docstring'''
try:
UpperCAmelCase__ : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase : int , lowerCAmelCase : Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"teacher must be a model or string got type {type(lowerCAmelCase )}"
UpperCAmelCase__ : int = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCAmelCase__ : Optional[Any] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCAmelCase )
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : Optional[int] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : int = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
UpperCAmelCase__ : int = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 660 | 0 |
from collections import deque
class lowerCAmelCase__ :
def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> int:
A = process_name # process name
A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A = arrival_time
A = burst_time # remaining burst time
A = 0 # total time of the process wait in ready queue
A = 0 # time from arrival time to completion time
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , ) -> Any:
A = number_of_queues
# time slice of queues that round robin algorithm applied
A = time_slices
# unfinished process is in this ready_queue
A = queue
# current time
A = current_time
# finished process is in this sequence queue
A = deque()
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Any ) -> List[Any]:
A = []
for i in range(len(A_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = []
for i in range(len(A_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : str ) -> str:
A = []
for i in range(len(A_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> Tuple:
return [q.burst_time for q in queue]
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] ) -> Any:
A = deque() # sequence deque of finished process
while len(A_ ) != 0:
A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(A_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A = 0
# set the process's turnaround time because it is finished
A = self.current_time - cp.arrival_time
# set the completion time
A = self.current_time
# add the process to queue that has finished queue
finished.append(A_ )
self.finish_queue.extend(A_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCamelCase ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any ) -> Any:
A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(A_ ) ):
A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(A_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(A_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A = 0
# set the finish time
A = self.current_time
# update the process' turnaround time because it is finished
A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(A_ )
self.finish_queue.extend(A_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
for i in range(self.number_of_queues - 1 ):
A = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case :Tuple =Process('P1', 0, 53)
__snake_case :Dict =Process('P2', 0, 17)
__snake_case :Union[str, Any] =Process('P3', 0, 68)
__snake_case :int =Process('P4', 0, 24)
__snake_case :Dict =3
__snake_case :Optional[Any] =[17, 25]
__snake_case :Optional[int] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__snake_case :Union[str, Any] =Process('P1', 0, 53)
__snake_case :str =Process('P2', 0, 17)
__snake_case :Union[str, Any] =Process('P3', 0, 68)
__snake_case :Any =Process('P4', 0, 24)
__snake_case :Optional[int] =3
__snake_case :Tuple =[17, 25]
__snake_case :Any =deque([Pa, Pa, Pa, Pa])
__snake_case :Dict =MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case :Any =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
) | 106 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : int = len(lowerCAmelCase )
# We need to create solution object to save path.
_UpperCAmelCase : List[Any] = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
_UpperCAmelCase : Tuple = run_maze(lowerCAmelCase , 0 , 0 , lowerCAmelCase )
if solved:
print("\n".join(str(lowerCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[list[int]] ) -> bool:
_UpperCAmelCase : str = len(lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase : Tuple = 1
return True
_UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase : Tuple = 1
# check for directions
if (
run_maze(lowerCAmelCase , i + 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j + 1 , lowerCAmelCase )
or run_maze(lowerCAmelCase , i - 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j - 1 , lowerCAmelCase )
):
return True
_UpperCAmelCase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=17 , SCREAMING_SNAKE_CASE=23 , SCREAMING_SNAKE_CASE=11 , SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
A : str = parent
A : Optional[Any] = batch_size
A : str = seq_length
A : str = act_dim
A : int = state_dim
A : Tuple = hidden_size
A : List[str] = max_length
A : List[Any] = is_training
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
A : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
A : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
A : Any = random_attention_mask((self.batch_size, self.seq_length) )
A : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = DecisionTransformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Optional[int] = config_and_inputs
A : Optional[int] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (DecisionTransformerModel,) if is_torch_available() else ()
__magic_name__ = ()
__magic_name__ = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__magic_name__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = DecisionTransformerModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[int] = DecisionTransformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A, A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : str = model_class(SCREAMING_SNAKE_CASE )
A : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Tuple = [*signature.parameters.keys()]
A : str = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Dict = 2 # number of steps of autoregressive prediction we will perform
A : List[Any] = 10 # defined by the RL environment, may be normalized
A : Tuple = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
A : List[Any] = model.to(SCREAMING_SNAKE_CASE )
A : List[Any] = model.config
torch.manual_seed(0 )
A : str = torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE , dtype=torch.floataa ) # env.reset()
A : List[str] = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=SCREAMING_SNAKE_CASE )
A : int = torch.tensor(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=torch.floataa ).reshape(1 , 1 , 1 )
A : Dict = state
A : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=SCREAMING_SNAKE_CASE , dtype=torch.floataa )
A : Union[str, Any] = torch.zeros(1 , 0 , device=SCREAMING_SNAKE_CASE , dtype=torch.floataa )
A : Tuple = torch.tensor(0 , device=SCREAMING_SNAKE_CASE , dtype=torch.long ).reshape(1 , 1 )
for step in range(SCREAMING_SNAKE_CASE ):
A : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=SCREAMING_SNAKE_CASE )] , dim=1 )
A : List[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=SCREAMING_SNAKE_CASE )] , dim=1 )
A : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
A, A, A : Optional[Any] = model(
states=SCREAMING_SNAKE_CASE , actions=SCREAMING_SNAKE_CASE , rewards=SCREAMING_SNAKE_CASE , returns_to_go=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
A, A, A, A : Optional[Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE , dtype=torch.floataa ),
1.0,
False,
{},
)
A : List[Any] = action_pred[0, -1]
A : List[str] = torch.cat([states, state] , dim=1 )
A : int = returns_to_go[0, -1] - reward
A : int = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
A : str = torch.cat(
[timesteps, torch.ones((1, 1) , device=SCREAMING_SNAKE_CASE , dtype=torch.long ) * (step + 1)] , dim=1 )
| 343 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase : Optional[int] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowercase : Optional[int] = 'zero2'
lowercase : List[str] = 'zero3'
lowercase : List[str] = [ZEROa, ZEROa]
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = parameterized.to_safe_name('''_'''.join(str(snake_case__ ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase : Any = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A ( __snake_case ):
@parameterized.expand(SCREAMING_SNAKE_CASE , name_func=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE , name_func=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE , )
@parameterized.expand(SCREAMING_SNAKE_CASE , name_func=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE , name_func=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE , model=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
pass
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = models[model]
A : Any = self.run_trainer(
stage=SCREAMING_SNAKE_CASE , model_name=SCREAMING_SNAKE_CASE , eval_steps=SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE , )
self.do_checks(SCREAMING_SNAKE_CASE )
return output_dir
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
A : Any = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE )
A : Dict = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A : Any = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
A : Union[str, Any] = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
A : List[Any] = self.get_launcher(SCREAMING_SNAKE_CASE )
A : int = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
return output_dir
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
A : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 343 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ : Any = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ : int = dict(zip(A_ , range(len(A_))))
lowerCAmelCase_ : Tuple = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(A_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A_))
lowerCAmelCase_ : Optional[Any] = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , A_)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(A_ , A_)
def UpperCAmelCase__ ( self : List[Any] , **A_ : str):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_)
def UpperCAmelCase__ ( self : List[str] , **A_ : Any):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_)
def UpperCAmelCase__ ( self : Union[str, Any] , **A_ : Optional[int]):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_)
def UpperCAmelCase__ ( self : Tuple):
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
lowerCAmelCase_ : List[str] = [Image.fromarray(np.moveaxis(A_ , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase_ : str = self.get_image_processor()
lowerCAmelCase_ : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
processor_slow.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_)
lowerCAmelCase_ : List[str] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
processor_fast.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , A_)
self.assertIsInstance(processor_fast.tokenizer , A_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , A_)
self.assertIsInstance(processor_fast.image_processor , A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
lowerCAmelCase_ : Optional[Any] = self.get_image_processor(do_normalize=A_ , padding_value=1.0)
lowerCAmelCase_ : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , A_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : List[str] = image_processor(A_ , return_tensors='''np''')
lowerCAmelCase_ : Tuple = processor(images=A_ , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[str] = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
lowerCAmelCase_ : Any = '''lower newer'''
lowerCAmelCase_ : Tuple = processor(text=A_)
lowerCAmelCase_ : int = tokenizer(A_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : List[str] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
lowerCAmelCase_ : int = '''lower newer'''
lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Union[str, Any] = processor(text=A_ , images=A_)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(A_):
processor()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
lowerCAmelCase_ : Dict = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : int = processor(images=A_ , visual_prompt=A_)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''conditional_pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(A_):
processor()
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_)
lowerCAmelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : Union[str, Any] = processor.batch_decode(A_)
lowerCAmelCase_ : str = tokenizer.batch_decode(A_)
self.assertListEqual(A_ , A_)
| 171 |
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
if index == number_of_items:
return 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[int] = knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ : Optional[Any] = values[index] + knapsack(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_weight - weights[index] ,index + 1 )
return max(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """▁"""
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
_lowerCAmelCase = {
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_lowerCAmelCase = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = []
snake_case_ = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
UpperCAmelCase_: int = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_: Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
UpperCAmelCase_: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
UpperCAmelCase_: Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_: Union[str, Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_: Tuple = 1
UpperCAmelCase_: Optional[int] = len(self.sp_model )
UpperCAmelCase_: List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
UpperCAmelCase_: List[str] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_: Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_: str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_: Optional[int] = src_lang if src_lang is not None else "eng_Latn"
UpperCAmelCase_: Optional[int] = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_: Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_: Any = self.__dict__.copy()
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self , A__ , A__ = None , A__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
UpperCAmelCase_: List[str] = [1] * len(self.prefix_tokens )
UpperCAmelCase_: Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , A__ , A__ , A__ , A__ , **A__ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_: Optional[Any] = src_lang
UpperCAmelCase_: str = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
UpperCAmelCase_: Union[str, Any] = self.convert_tokens_to_ids(A__ )
UpperCAmelCase_: Optional[int] = tgt_lang_id
return inputs
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , A__ ):
"""simple docstring"""
return self.sp_model.encode(A__ , out_type=A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_: int = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , A__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = "".join(A__ ).replace(A__ , " " ).strip()
return out_string
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_: str = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , "wb" ) as fi:
UpperCAmelCase_: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def snake_case_ ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: List[str] = src_lang
UpperCAmelCase_: Dict = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def snake_case_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCAmelCase_: Tuple = []
UpperCAmelCase_: Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_: Any = [self.cur_lang_code]
UpperCAmelCase_: Optional[int] = [self.eos_token_id]
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: List[str] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_: List[str] = [self.cur_lang_code]
UpperCAmelCase_: str = [self.eos_token_id] | 703 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCAmelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ,_a=None ) -> int:
# Initialise PyTorch model
UpperCAmelCase_: Dict = XLNetConfig.from_json_file(_a )
UpperCAmelCase_: List[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
UpperCAmelCase_: int = finetuning_task
UpperCAmelCase_: List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Tuple = XLNetForSequenceClassification(_a )
elif "squad" in finetuning_task:
UpperCAmelCase_: Any = finetuning_task
UpperCAmelCase_: Tuple = XLNetForQuestionAnswering(_a )
else:
UpperCAmelCase_: int = XLNetLMHeadModel(_a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_a ,_a ,_a )
# Save pytorch-model
UpperCAmelCase_: List[Any] = os.path.join(_a ,_a )
UpperCAmelCase_: Union[str, Any] = os.path.join(_a ,_a )
print(f"Save PyTorch model to {os.path.abspath(_a )}" )
torch.save(model.state_dict() ,_a )
print(f"Save configuration file to {os.path.abspath(_a )}" )
with open(_a ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_lowerCAmelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 306 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 50, __a = None, __a = "pil", __a = True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.unet.config.sample_size
_lowerCAmelCase : Optional[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t]
_lowerCAmelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase , _lowerCAmelCase : Dict = self.scheduler.add_noise_to_input(__a, __a, generator=__a)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Optional[int] = self.scheduler.step(__a, __a, __a, __a)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowerCAmelCase : List[str] = self.scheduler.step_correct(
__a, __a, __a, __a, step_output.prev_sample, step_output["derivative"], )
_lowerCAmelCase : Optional[int] = step_output.prev_sample
_lowerCAmelCase : Tuple = (sample / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : int = sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : int = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 500 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
_lowerCAmelCase : str = torch.tensor(tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCAmelCase : Optional[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCAmelCase : List[Any] = logits[0, masked_index, :]
_lowerCAmelCase : int = logits.softmax(dim=0 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = prob.topk(k=_lowerCamelCase , dim=0 )
_lowerCAmelCase : Dict = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCamelCase ) )] )
_lowerCAmelCase : List[str] = tokenizer.mask_token
_lowerCAmelCase : Optional[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_lowerCAmelCase : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCamelCase ) , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCamelCase , _lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case = CamembertTokenizer.from_pretrained("camembert-base")
_snake_case = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_snake_case = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 500 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 713 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCAmelCase_ ):
lowerCamelCase_ = burst_time[i]
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 999999999
lowerCamelCase_ = 0
lowerCamelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCamelCase_ = remaining_time[j]
lowerCamelCase_ = j
lowerCamelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCamelCase_ = remaining_time[short]
if minm == 0:
lowerCamelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
lowerCamelCase_ = False
# Find finish time of current process
lowerCamelCase_ = increment_time + 1
# Calculate waiting time
lowerCamelCase_ = finish_time - arrival_time[short]
lowerCamelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCamelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] ):
lowerCamelCase_ = [0] * no_of_processes
for i in range(UpperCAmelCase_ ):
lowerCamelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(UpperCAmelCase_ ):
lowerCamelCase_ = total_waiting_time + waiting_time[i]
lowerCamelCase_ = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
a_ : Dict = int(input())
a_ : Any = [0] * no_of_processes
a_ : Optional[int] = [0] * no_of_processes
a_ : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
a_ , a_ : str = map(int, input().split())
a_ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a_ : int = burst_time
a_ : Union[str, Any] = no_of_processes
a_ : Optional[int] = waiting_time
a_ : Any = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a_ : Optional[int] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 445 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _A ( _lowercase , _lowercase , _lowercase=0 ) -> Any:
"""simple docstring"""
if name is None:
__UpperCamelCase = None
else:
__UpperCamelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
__UpperCamelCase = fmt.format(_lowercase )
# Print and recurse (if needed).
if isinstance(_lowercase , _lowercase ):
if msg is not None:
print(_lowercase )
for k in val.keys():
recursive_print(_lowercase , val[k] , spaces + 2 )
elif isinstance(_lowercase , torch.Tensor ):
print(_lowercase , ':' , val.size() )
else:
print(_lowercase , ':' , _lowercase )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
__UpperCamelCase = param.view(*_lowercase )
__UpperCamelCase = param.transpose(0 , 2 )
__UpperCamelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
__UpperCamelCase = param.view(*_lowercase )
__UpperCamelCase = param.transpose(0 , 1 ).contiguous()
__UpperCamelCase = param.view(*_lowercase )
return param
def _A ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {}
# old versions did not store training args
__UpperCamelCase = input_state_dict.get('args' , _lowercase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__UpperCamelCase = ds_args.padded_vocab_size
__UpperCamelCase = ds_args.max_position_embeddings
__UpperCamelCase = ds_args.hidden_size
__UpperCamelCase = ds_args.num_layers
__UpperCamelCase = ds_args.num_attention_heads
__UpperCamelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__UpperCamelCase = config.n_head
# The hidden_size per head.
__UpperCamelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__UpperCamelCase = input_state_dict['checkpoint_version']
else:
__UpperCamelCase = 0.0
# The model.
__UpperCamelCase = input_state_dict['model']
# The language model.
__UpperCamelCase = model['language_model']
# The embeddings.
__UpperCamelCase = lm['embedding']
# The word embeddings.
__UpperCamelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__UpperCamelCase = word_embeddings[: config.vocab_size, :]
__UpperCamelCase = word_embeddings
# The position embeddings.
__UpperCamelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__UpperCamelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
__UpperCamelCase = pos_embeddings
# The transformer.
__UpperCamelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__UpperCamelCase = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__UpperCamelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__UpperCamelCase = layer_re.match(_lowercase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__UpperCamelCase = int(m.group(1 ) )
# The name of the operation.
__UpperCamelCase = m.group(2 )
# Is it a weight or a bias?
__UpperCamelCase = m.group(3 )
# The name of the layer.
__UpperCamelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__UpperCamelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
__UpperCamelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowercase , _lowercase )
__UpperCamelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
__UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa )
__UpperCamelCase = masked_bias
__UpperCamelCase = fix_query_key_value_ordering(_lowercase , _lowercase , 3 , _lowercase , _lowercase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__UpperCamelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
__UpperCamelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__UpperCamelCase = fix_query_key_value_ordering(_lowercase , _lowercase , 3 , _lowercase , _lowercase )
# Store. No change of shape.
__UpperCamelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__UpperCamelCase = megatron_to_transformers[op_name]
__UpperCamelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__UpperCamelCase = megatron_to_transformers[op_name]
__UpperCamelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__UpperCamelCase = transformer['final_layernorm.weight']
__UpperCamelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__UpperCamelCase = word_embeddings
# It should be done!
return output_state_dict
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_lowercase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_lowercase , help='An optional config json file describing the pre-trained model.' , )
__UpperCamelCase = parser.parse_args()
# Extract the basename.
__UpperCamelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
else:
__UpperCamelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
__UpperCamelCase = input_state_dict.get('args' , _lowercase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__UpperCamelCase = 'gelu_fast'
elif ds_args.openai_gelu:
__UpperCamelCase = 'gelu_new'
else:
__UpperCamelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__UpperCamelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
__UpperCamelCase = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=_lowercase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_lowercase , summary_activation=_lowercase , summary_proj_to_labels=_lowercase , summary_first_dropout=0.1 , scale_attn_weights=_lowercase , use_cache=_lowercase , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
__UpperCamelCase = GPTaConfig.from_json_file(args.config_file )
__UpperCamelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
__UpperCamelCase = convert_megatron_checkpoint(_lowercase , _lowercase , _lowercase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowercase , _lowercase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__UpperCamelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__UpperCamelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__UpperCamelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
__UpperCamelCase = 'gpt2'
__UpperCamelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCamelCase = type(_lowercase ).__name__
__UpperCamelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_lowercase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_lowercase )
# Store the state_dict to file.
__UpperCamelCase = os.path.join(_lowercase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_lowercase , _lowercase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
for i in range(len(A_ ) - 1 , 0 , -1 ):
lowerCAmelCase__ : Optional[Any] = False
for j in range(A_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase__ : Dict = True
for j in range(A_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 450 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ = True
lowercase__ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ):
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ):
print("=" * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowercase__ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE_ )
os.remove(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 37 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
_lowercase : Any =5
_lowercase : int =10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( snake_case__ , unittest.TestCase ):
_a : Dict = SpeechaTextTokenizer
_a : Optional[Any] = False
_a : List[Any] = True
def __a ( self : Tuple ):
super().setUp()
lowerCamelCase_ : str = sp.SentencePieceProcessor()
spm_model.Load(lowerCamelCase )
lowerCamelCase_ : Any = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCamelCase ) )]
lowerCamelCase_ : Optional[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCamelCase_ : Union[str, Any] = Path(self.tmpdirname )
save_json(lowerCamelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
lowerCamelCase_ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] ):
lowerCamelCase_ : List[Any] = '<pad>'
lowerCamelCase_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __a ( self : Any ):
lowerCamelCase_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCamelCase ) , 10_01 )
def __a ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __a ( self : List[str] ):
lowerCamelCase_ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
lowerCamelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCamelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(lowerCamelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def __a ( self : Union[str, Any] ):
# fmt: off
lowerCamelCase_ : Any = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase_ ( unittest.TestCase ):
_a : Optional[Any] = 'valhalla/s2t_mustc_multilinguial_medium'
_a : Optional[Any] = 'C\'est trop cool'
_a : List[Any] = 'Esto es genial'
@classmethod
def __a ( cls : List[Any] ):
lowerCamelCase_ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __a ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def __a ( self : Optional[int] ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __a ( self : Dict ):
self.assertIn(lowerCamelCase , self.tokenizer.all_special_ids )
lowerCamelCase_ : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
lowerCamelCase_ : int = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
lowerCamelCase_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase )
def __a ( self : Tuple ):
lowerCamelCase_ : Any = 'fr'
lowerCamelCase_ : Tuple = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , lowerCamelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __a ( self : List[str] ):
lowerCamelCase_ : Tuple = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCamelCase_ : Any = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 364 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase_ ( snake_case__ ):
_a : Union[str, Any] = (DPMSolverSDEScheduler,)
_a : List[Any] = 1_0
def __a ( self : Any , **lowerCamelCase : str ):
lowerCamelCase_ : Any = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase )
return config
def __a ( self : Tuple ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __a ( self : int ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __a ( self : Any ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __a ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __a ( self : Optional[int] ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : str = self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Optional[Any] = self.dummy_model()
lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : Union[str, Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : str = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = output.prev_sample
lowerCamelCase_ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Tuple ):
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ : Tuple = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : Dict = self.dummy_model()
lowerCamelCase_ : Dict = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ : Tuple = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : List[str] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = output.prev_sample
lowerCamelCase_ : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : str = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Any ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Union[str, Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
lowerCamelCase_ : Dict = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Any = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 364 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[Any] = '▁'
__A : Any = {'vocab_file': 'sentencepiece.bpe.model'}
__A : int = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
__A : str = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : List[Any]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,) | 16 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = v.to_dict()
return d | 16 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __UpperCamelCase :
def __init__( self ) -> List[str]:
a : List[str] = psutil.Process()
a : Dict = False
def __a ( self ) -> Any:
a : str = -1
while True:
a : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __a ( self ) -> str:
a : Dict = True
a : Optional[Any] = threading.Thread(target=self.peak_monitor )
a : Optional[int] = True
self.thread.start()
def __a ( self ) -> Any:
a : Optional[int] = False
self.thread.join()
return self.cpu_memory_peak
a : str = PeakCPUMemory()
def _SCREAMING_SNAKE_CASE ( ) ->List[Any]:
'''simple docstring'''
a : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a : Any = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a : List[str] = torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE )
torch.cuda.reset_peak_memory_stats()
return measures
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->int:
'''simple docstring'''
a : Optional[Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a : Tuple = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
a : Optional[Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a : List[Any] = (torch.cuda.memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
a : List[Any] = (torch.cuda.max_memory_allocated(__SCREAMING_SNAKE_CASE ) - start_measures[str(__SCREAMING_SNAKE_CASE )]) / 2**20
return measures
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : int ) ->Optional[int]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__SCREAMING_SNAKE_CASE )]:.2f}MiB""" )
a : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 701 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase_ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(vocab, range(len(vocab))))
lowerCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = Path(tmpdirname)
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
lowerCamelCase_ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase_ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase_ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCamelCase_ = tokenizer(["Making tiny model"], return_tensors="pt")
lowerCamelCase_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 151 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 16 ):
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ =load_dataset("""glue""", """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__UpperCamelCase, max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ =datasets.map(
__UpperCamelCase, batched=__UpperCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ =tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ =8
else:
SCREAMING_SNAKE_CASE__ =None
return tokenizer.pad(
__UpperCamelCase, padding="""longest""", max_length=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_tensors="""pt""", )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""train"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""validation"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ =2
# New Code #
SCREAMING_SNAKE_CASE__ =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ =config["""lr"""]
SCREAMING_SNAKE_CASE__ =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ =int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ =evaluate.load("""glue""", """mrpc""" )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_dataloaders(__UpperCamelCase, __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ =AdamW(params=model.parameters(), lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.prepare(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase, references=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", __UpperCamelCase )
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=__UpperCamelCase, default=__UpperCamelCase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""", type=__UpperCamelCase, default=1, help="""The number of minibatches to be ran before gradients are accumulated.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ =parser.parse_args()
SCREAMING_SNAKE_CASE__ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
main()
| 151 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : Optional[Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
snake_case_ = XLNetConfig.from_json_file(__UpperCAmelCase )
snake_case_ = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
snake_case_ = finetuning_task
snake_case_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case_ = XLNetForSequenceClassification(__UpperCAmelCase )
elif "squad" in finetuning_task:
snake_case_ = finetuning_task
snake_case_ = XLNetForQuestionAnswering(__UpperCAmelCase )
else:
snake_case_ = XLNetLMHeadModel(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Save pytorch-model
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
print(F"Save PyTorch model to {os.path.abspath(__UpperCAmelCase )}" )
torch.save(model.state_dict(), __UpperCAmelCase )
print(F"Save configuration file to {os.path.abspath(__UpperCAmelCase )}" )
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a : Optional[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 593 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
a : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
def __init__( self : List[str] , *lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Dict ):
super().__init__(*lowercase_ , **lowercase_ )
snake_case_ = eval_examples
snake_case_ = post_process_function
snake_case_ = quant_trainer_args
snake_case_ = 128 # default number of calibration samples
def A_ ( self : int , lowercase_ : Tuple=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case_ = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case_ = self._remove_unused_columns(lowercase_ , description='''Calibration''' )
return DataLoader(
lowercase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase_ , )
def A_ ( self : Dict , lowercase_ : str=None ):
snake_case_ = self.train_dataset if calib_dataset is None else calib_dataset
snake_case_ = self.get_calib_dataloader(lowercase_ )
snake_case_ = self.model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args , calib=lowercase_ )
model.eval()
quant_trainer.enable_calibration(lowercase_ )
logger.info('''***** Running calibration *****''' )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(lowercase_ ):
# Prediction step
snake_case_ ,snake_case_ ,snake_case_ = self.prediction_step(lowercase_ , lowercase_ , prediction_loss_only=lowercase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase_ , self.quant_trainer_args )
snake_case_ = model
def A_ ( self : Optional[int] , lowercase_ : Any=None , lowercase_ : Any=None , lowercase_ : Optional[int]=None , lowercase_ : str = "eval" ):
snake_case_ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
self.log(lowercase_ )
else:
snake_case_ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def A_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict=None , lowercase_ : str = "test" ):
snake_case_ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ = self.compute_metrics
snake_case_ = None
snake_case_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ = eval_loop(
lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , )
finally:
snake_case_ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ = self.post_process_function(lowercase_ , lowercase_ , output.predictions , '''predict''' )
snake_case_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
snake_case_ = metrics.pop(lowercase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
def A_ ( self : Any , lowercase_ : List[Any]="./" ):
snake_case_ = self.eval_dataset
snake_case_ = self.get_eval_dataloader(lowercase_ )
snake_case_ = next(iter(lowercase_ ) )
# saving device - to make it consistent
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case_ = tuple(v.to(lowercase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case_ = True
snake_case_ = self.model.to(lowercase_ )
model.eval()
model.float()
snake_case_ = model.module if hasattr(lowercase_ , '''module''' ) else model
quant_trainer.configure_model(lowercase_ , self.quant_trainer_args )
snake_case_ = os.path.join(lowercase_ , '''model.onnx''' )
logger.info(F"exporting model to {output_model_file}" )
snake_case_ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
lowercase_ , lowercase_ , lowercase_ , export_params=lowercase_ , opset_version=13 , do_constant_folding=lowercase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=lowercase_ , )
logger.info('''onnx export finished''' )
| 593 | 1 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase = set()
return any(
node not in visited and depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for node in graph )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
visited.add(lowerCAmelCase_ )
rec_stk.add(lowerCAmelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCAmelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 310 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase , lowercase = 0, 1
while True:
lowercase , lowercase = b, a + b
yield b
def UpperCAmelCase_ ( lowerCAmelCase_ = 1000 ):
"""simple docstring"""
lowercase = 1
lowercase = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 597 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline
UpperCAmelCase_ =["image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase_ =[
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase_ =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ =False
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return 32
@property
def _UpperCamelCase ( self ) -> Tuple:
return 32
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> List[Any]:
return 100
@property
def _UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**_A )
return model
@property
def _UpperCamelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE_ = DDIMScheduler(**_A )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCamelCase ( self , _A , _A=0 ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((256, 256) )
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_A )
SCREAMING_SNAKE_CASE_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_A ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE_ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
SCREAMING_SNAKE_CASE_ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 597 | 1 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def snake_case ( ):
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
return args.f
class UpperCamelCase_ (__A ):
def _SCREAMING_SNAKE_CASE ( self : str ) -> None:
UpperCAmelCase_ : List[str] = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase_ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : Tuple = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase_ )
UpperCAmelCase_ : str = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase_ )
| 95 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Union[str, Any]=224 , lowerCAmelCase_ : List[Any]=30 , lowerCAmelCase_ : Any=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : str=[0.5, 0.5, 0.5] , ) -> Dict:
UpperCAmelCase_ : int = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : List[str] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean
UpperCAmelCase_ : Dict = image_std
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : str = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Dict = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
# Initialize image_processor
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processor(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 95 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = CLIPConfig
lowerCAmelCase__ = ["CLIPEncoderLayer"]
def __init__( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase_ = CLIPVisionModelWithProjection(config.vision_config )
lowercase_ = nn.Linear(config.vision_config.projection_dim , 1 )
lowercase_ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=0.5 , UpperCAmelCase=0.5 ) -> int:
'''simple docstring'''
lowercase_ = self.vision_model(UpperCAmelCase )[0]
lowercase_ = self.p_head(UpperCAmelCase )
lowercase_ = nsfw_detected.flatten()
lowercase_ = nsfw_detected > p_threshold
lowercase_ = nsfw_detected.tolist()
if any(UpperCAmelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase ):
if nsfw_detected_:
lowercase_ = np.zeros(images[idx].shape )
lowercase_ = self.w_head(UpperCAmelCase )
lowercase_ = watermark_detected.flatten()
lowercase_ = watermark_detected > w_threshold
lowercase_ = watermark_detected.tolist()
if any(UpperCAmelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase ):
if watermark_detected_:
lowercase_ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 601 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str]=2 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: Optional[int]=16 , __lowerCamelCase: int = 10 , __lowerCamelCase: int = 2 ):
'''simple docstring'''
def get_dataset(__lowerCamelCase: List[Any] ):
lowercase_ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = get_dataset(__lowerCamelCase )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
lowercase_ = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=None ):
'''simple docstring'''
lowercase_ = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
lowercase_ , lowercase_ = batch
lowercase_ = model(__lowerCamelCase )
lowercase_ = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase_ = nn.Parameter(torch.randn(1 ) )
lowercase_ = nn.Parameter(torch.randn(1 ) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return x * self.a + self.b
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(total_limit=1 , project_dir=UpperCAmelCase , automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
# Train baseline
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
lowercase_ = os.path.join(UpperCAmelCase , "initial" )
accelerator.save_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = Accelerator()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
lowercase_ = os.path.join(UpperCAmelCase , "checkpoint" )
accelerator.save_state(UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCAmelCase )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
lowercase_ = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
# Train partially
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCAmelCase )
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowercase_) , (lowercase_)) = model.a.item(), model.b.item()
lowercase_ = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = torch.tensor([1, 2, 3] )
lowercase_ = torch.tensor([2, 3, 4] )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(net.parameters() )
lowercase_ = Accelerator()
with self.assertRaises(UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def A__ ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowercase_ = torch.optim.lr_scheduler.StepLR(UpperCAmelCase , step_size=1 , gamma=0.99 )
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
lowercase_ = scheduler.state_dict()
train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(UpperCAmelCase , scheduler.state_dict() )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowercase_ = DummyModel()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase , total_limit=2 )
# Train baseline
lowercase_ = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowercase_ = accelerator.prepare(UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE__ = DummyModel()
SCREAMING_SNAKE_CASE__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
SCREAMING_SNAKE_CASE__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dummy_dataloaders()
SCREAMING_SNAKE_CASE__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 601 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = XLMTokenizer
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__lowerCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : Optional[Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
__lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = "lower newer"
__lowerCAmelCase : List[Any] = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = XLMTokenizer(self.vocab_file , self.merges_file)
__lowerCAmelCase : Optional[Any] = "lower"
__lowerCAmelCase : List[Any] = ["low", "er</w>"]
__lowerCAmelCase : int = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokens + ["<unk>"]
__lowerCAmelCase : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
__lowerCAmelCase : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 293 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: ArgumentParser) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
raise NotImplementedError() | 293 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _snake_case ( A_ : Any="" ):
"""simple docstring"""
a_ : Dict = tempfile.mkdtemp()
return os.path.join(A_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
a_ : int = AgentAudio(lowerCAmelCase_ )
a_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
# Ensure that the file contains the same value as the original tensor
a_ , a_ : List[str] = sf.read(lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , torch.tensor(lowerCAmelCase_ ) , atol=1E-4 ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
a_ : str = get_new_path(suffix=""".wav""" )
sf.write(lowerCAmelCase_ , lowerCAmelCase_ , 1_60_00 )
a_ : Optional[int] = AgentAudio(lowerCAmelCase_ )
self.assertTrue(torch.allclose(lowerCAmelCase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = torch.randint(0 , 2_56 , (64, 64, 3) )
a_ : List[str] = AgentImage(lowerCAmelCase_ )
a_ : Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
a_ : Any = Image.open(lowerCAmelCase_ )
a_ : List[Any] = AgentImage(lowerCAmelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
a_ : Union[str, Any] = Image.open(lowerCAmelCase_ )
a_ : Union[str, Any] = AgentImage(lowerCAmelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = """Hey!"""
a_ : Tuple = AgentText(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , agent_type.to_string() )
self.assertEqual(lowerCAmelCase_ , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 460 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
a_ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = """sshleifer/tiny-gpt2"""
a_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : List[Any] = PyTorchBenchmark(lowerCAmelCase_ )
a_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = """sgugger/tiny-distilbert-classification"""
a_ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , only_pretrain_model=lowerCAmelCase_ , )
a_ : Optional[Any] = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """sshleifer/tiny-gpt2"""
a_ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , torchscript=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : Optional[Any] = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """sshleifer/tiny-gpt2"""
a_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , fpaa=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : List[Any] = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = """sshleifer/tiny-gpt2"""
a_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
# set architectures equal to `None`
a_ : Any = None
a_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : Optional[Any] = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
a_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = """sshleifer/tiny-gpt2"""
a_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : int = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = """sshleifer/tiny-gpt2"""
a_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCAmelCase_ , multi_process=lowerCAmelCase_ , )
a_ : List[str] = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = """sshleifer/tiny-gpt2"""
a_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
a_ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : Tuple = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
a_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = """sshleifer/tinier_bart"""
a_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
a_ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : Union[str, Any] = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
a_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = """sshleifer/tiny-gpt2"""
a_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
a_ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : Union[str, Any] = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
a_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = """sshleifer/tinier_bart"""
a_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
a_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase_ , )
a_ : int = PyTorchBenchmark(lowerCAmelCase_ , configs=[config] )
a_ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , save_to_csv=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase_ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowerCAmelCase_ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowerCAmelCase_ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowerCAmelCase_ , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowerCAmelCase_ , """env.csv""" ) , multi_process=lowerCAmelCase_ , )
a_ : Tuple = PyTorchBenchmark(lowerCAmelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """env.csv""" ) ).exists() )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Dict = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCAmelCase_ ):
self.assertTrue(hasattr(lowerCAmelCase_ , """sequential""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """cumulative""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """current""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase_ , inference=lowerCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase_ , """log.txt""" ) , log_print=lowerCAmelCase_ , trace_memory_line_by_line=lowerCAmelCase_ , multi_process=lowerCAmelCase_ , )
a_ : Dict = PyTorchBenchmark(lowerCAmelCase_ )
a_ : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCAmelCase_ , """log.txt""" ) ).exists() )
| 460 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
__snake_case : Optional[int] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 293 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: str,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: int,A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,A_ )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,A_ )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
__UpperCamelCase = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def snake_case_ ( self: int,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_,A_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,A_,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: List[Any],A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = AStar(A_,A_ )
__UpperCamelCase = False
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_,A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: List[str],A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(A_ )
__UpperCamelCase = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 1 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case: Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
snake_case: str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(_UpperCAmelCase )
from datasets import load_dataset
snake_case: int = load_dataset("""nielsr/rvlcdip-demo""" )
snake_case: str = dataset['''train'''][0]['''image'''].convert("""RGB""" )
snake_case: Optional[int] = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case: Optional[Any] = model(**_UpperCAmelCase )
snake_case: List[str] = outputs.logits
snake_case: Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCAmelCase )
snake_case: str = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 713 | from __future__ import annotations
def a_ (_lowerCAmelCase : int )-> list[int]:
snake_case: List[str] = 2
snake_case: Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0]
class __lowerCamelCase (_a ):
def __init__( self: Optional[int],A_: Tuple,A_: Optional[Any]=None,A_: Dict=None ):
'''simple docstring'''
__UpperCamelCase = file_names
__UpperCamelCase = image_transform
__UpperCamelCase = label_to_id
def __len__( self: Any ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self: Dict,A_: Dict ):
'''simple docstring'''
__UpperCamelCase = self.file_names[idx]
__UpperCamelCase = PIL.Image.open(A_ )
__UpperCamelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
__UpperCamelCase = self.image_transform(A_ )
__UpperCamelCase = extract_label(A_ )
if self.label_to_id is not None:
__UpperCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['lr']
__UpperCamelCase = int(config['num_epochs'] )
__UpperCamelCase = int(config['seed'] )
__UpperCamelCase = int(config['batch_size'] )
__UpperCamelCase = config['image_size']
if not isinstance(_lowercase , (list, tuple) ):
__UpperCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
__UpperCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__UpperCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCamelCase = os.path.split(_lowercase )[-1].split('.' )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
__UpperCamelCase = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
__UpperCamelCase = [extract_label(_lowercase ) for fname in file_names]
__UpperCamelCase = list(set(_lowercase ) )
id_to_label.sort()
__UpperCamelCase = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
__UpperCamelCase = np.random.permutation(len(_lowercase ) )
__UpperCamelCase = int(0.8 * len(_lowercase ) )
__UpperCamelCase = random_perm[:cut]
__UpperCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCamelCase = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
__UpperCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
__UpperCamelCase = Compose([Resize(_lowercase ), ToTensor()] )
__UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__UpperCamelCase = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCamelCase = False
for param in model.get_classifier().parameters():
__UpperCamelCase = True
# We normalize the batches of images to be a bit faster.
__UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
__UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__UpperCamelCase = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCamelCase = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
__UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
__UpperCamelCase = None
else:
__UpperCamelCase = int(training_difference.replace('step_' , '' ) )
__UpperCamelCase = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
__UpperCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCamelCase = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = torch.nn.functional.cross_entropy(_lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
__UpperCamelCase = 0
__UpperCamelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCamelCase = (batch['image'] - mean) / std
with torch.no_grad():
__UpperCamelCase = model(_lowercase )
__UpperCamelCase = outputs.argmax(dim=-1 )
__UpperCamelCase, __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
__UpperCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_lowercase ),
'epoch': epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
__UpperCamelCase = f'''epoch_{epoch}'''
if args.output_dir is not None:
__UpperCamelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
lowerCamelCase_ = spanish_id.replace('-' ,'' ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
lowerCamelCase_ = int(spanish_id_clean[0:8] )
lowerCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = "▁"
__lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizer
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCAmelCase__ ( self : int ):
super().setUp()
_UpperCAmelCase = self.tokenizer_class(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCamelCase ) , 1_004 )
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = BigBirdTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Any ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = "Hello World!"
_UpperCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_UpperCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors="pt" , return_token_type_ids=__UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCamelCase )
_UpperCAmelCase = BigBirdConfig(attention_type="original_full" )
_UpperCAmelCase = BigBirdModel(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
_UpperCAmelCase = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 129 | 0 |
"""simple docstring"""
import math
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
_UpperCAmelCase : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCAmelCase )
if number < 1:
_UpperCAmelCase : List[Any] = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowerCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_UpperCAmelCase : Optional[Any] = int(math.log(number // 3, 2 ) ) + 2
_UpperCAmelCase : int = [3, 5]
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : Dict = 3
for block in range(1, _lowerCAmelCase ):
for _ in range(_lowerCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCamelCase__ : Optional[int] = 0
try:
lowerCamelCase__ : Optional[int] = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 238 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCamelCase ( _lowerCAmelCase : Tuple ) -> Tuple:
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> int:
_UpperCAmelCase : Optional[int] = np.max(_outputs, axis=-1, keepdims=_lowerCAmelCase )
_UpperCAmelCase : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowerCAmelCase )
class _UpperCAmelCase ( __a):
__a : Tuple = """sigmoid"""
__a : str = """softmax"""
__a : List[Any] = """none"""
@add_end_docstrings(
__a , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _UpperCAmelCase ( __a):
__a : int = False
__a : Union[str, Any] = ClassificationFunction.NONE
def __init__( self , **_A ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __snake_case ( self , _A=None , _A=None , _A="" , **_A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = tokenizer_kwargs
_UpperCAmelCase : Tuple = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
_UpperCAmelCase : Optional[int] = self.model.config.return_all_scores
if isinstance(_A , _A ) or top_k is None:
_UpperCAmelCase : List[str] = top_k
_UpperCAmelCase : List[str] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , _A , )
if return_all_scores:
_UpperCAmelCase : List[str] = None
else:
_UpperCAmelCase : Union[str, Any] = 1
if isinstance(_A , _A ):
_UpperCAmelCase : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase : Union[str, Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = super().__call__(*_A , **_A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase : int = """top_k""" not in kwargs
if isinstance(args[0] , _A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __snake_case ( self , _A , **_A ) -> Dict[str, GenericTensor]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.framework
if isinstance(_A , _A ):
return self.tokenizer(**_A , return_tensors=_A , **_A )
elif isinstance(_A , _A ) and len(_A ) == 1 and isinstance(inputs[0] , _A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_A , **_A )
elif isinstance(_A , _A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(_A , return_tensors=_A , **_A )
def __snake_case ( self , _A ) -> str:
'''simple docstring'''
return self.model(**_A )
def __snake_case ( self , _A , _A=None , _A=1 , _A=True ) -> Dict:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase : Any = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase : int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
_UpperCAmelCase : int = self.model.config.function_to_apply
else:
_UpperCAmelCase : Any = ClassificationFunction.NONE
_UpperCAmelCase : List[Any] = model_outputs["""logits"""][0]
_UpperCAmelCase : Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase : str = sigmoid(_A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase : str = softmax(_A )
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase : Union[str, Any] = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase : List[str] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(_A )
]
if not _legacy:
dict_scores.sort(key=lambda _A : x["score"] , reverse=_A )
if top_k is not None:
_UpperCAmelCase : Tuple = dict_scores[:top_k]
return dict_scores
| 238 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : float ):
'''simple docstring'''
return 0.0
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 5_12
SCREAMING_SNAKE_CASE : Optional[int] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : Tuple = [filter_type.process(lowerCamelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : Optional[Any] = np.abs(np.fft.fft(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = 20 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE : Any = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(lowerCamelCase_ )
plt.show()
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 5_12
SCREAMING_SNAKE_CASE : Optional[Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE : str = [filter_type.process(lowerCamelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE : List[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE : Optional[Any] = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 79 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase = random.Random()
def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : Any = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : int = spectrogram_length
SCREAMING_SNAKE_CASE : List[Any] = feature_size
SCREAMING_SNAKE_CASE : Any = num_audio_channels
SCREAMING_SNAKE_CASE : Tuple = hop_length
SCREAMING_SNAKE_CASE : str = chunk_length
SCREAMING_SNAKE_CASE : Dict = sampling_rate
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ):
'''simple docstring'''
def _flatten(lowerCamelCase_ : Dict ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
| 79 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase__ ( _a ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self._create_example_records()
lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(__UpperCAmelCase ):
self.assertDictEqual(__UpperCAmelCase , example_records[i] )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self._create_example_records()
lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase )
lowerCAmelCase__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def UpperCAmelCase ( self )-> Any: # checks what happens with missing columns
'''simple docstring'''
lowerCAmelCase__ = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def UpperCAmelCase ( self )-> Optional[int]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowerCAmelCase__ = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
lowerCAmelCase__ = Dataset.from_list(__UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = Dataset.from_list([] )
self.assertEqual(len(__UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 339 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 0 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
a__ = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
a__ = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
a__ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def lowercase__ ( self , _a , _a ):
"""simple docstring"""
for example in examples:
a__ = video_classifier(_a )
self.assertEqual(
_a , [
{'score': ANY(_a ), 'label': ANY(_a )},
{'score': ANY(_a ), 'label': ANY(_a )},
] , )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
a__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
a__ = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
a__ = pipeline(
'video-classification' , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
a__ = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
a__ = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
a__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def lowercase__ ( self ):
"""simple docstring"""
pass
| 126 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : str = 16
__A : Union[str, Any] = 32
def lowerCAmelCase_ ( a : str ):
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
a__ = torch.cuda.memory_allocated()
a__ = torch.cuda.max_memory_allocated()
a__ = bamb(self.end - self.begin )
a__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( a : Accelerator , a : int = 16 , a : str = "bert-base-cased" , a : int = 320 , a : int = 160 , ):
a__ = AutoTokenizer.from_pretrained(a )
a__ = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(a : Any ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] ):
# Initialize accelerator
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = args.model_name_or_path
set_seed(a )
a__ , a__ = get_dataloaders(a , a , a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(a , return_dict=a )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=a )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a__ = 1
a__ = (len(a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=0 , num_training_steps=a , )
else:
a__ = DummyScheduler(a , total_num_steps=a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
a , a , a , a , a )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = {}
for epoch in range(a , a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a ):
a__ = model(**a )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(a , a )
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a , )
parser.add_argument(
'--output_dir' , type=a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=a , default=a , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=a , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=a , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=a , default=1 , help='Number of train epochs.' , )
a__ = parser.parse_args()
a__ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 126 | 1 |
"""simple docstring"""
from math import factorial
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : int = real
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = [1] * rank
else:
a : str = rank
def __repr__( self ) -> Dict:
return (
f"""{self.real}+"""
f"""{'+'.join(str(lowerCAmelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __a ( self ) -> Optional[Any]:
a : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase__ )
def __add__( self , lowerCAmelCase__ ) -> List[str]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return Dual(self.real + other , self.duals )
a : Tuple = self.duals.copy()
a : Optional[Any] = other.duals.copy()
if len(lowerCAmelCase__ ) > len(lowerCAmelCase__ ):
o_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
elif len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
s_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
a : Optional[Any] = []
for i in range(len(lowerCAmelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase__ )
lowerCamelCase : int =__add__
def __sub__( self , lowerCAmelCase__ ) -> Optional[Any]:
return self + other * -1
def __mul__( self , lowerCAmelCase__ ) -> Any:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase__ )
a : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase__ )
lowerCamelCase : List[str] =__mul__
def __truediv__( self , lowerCAmelCase__ ) -> Dict:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase__ )
raise ValueError
def __floordiv__( self , lowerCAmelCase__ ) -> List[Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase__ )
raise ValueError
def __pow__( self , lowerCAmelCase__ ) -> Tuple:
if n < 0 or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
a : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowercase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowercase , _lowercase ):
raise ValueError("differentiate() requires an int as input for order" )
a : Dict = Dual(_lowercase , 1 )
a : Any = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->List[Any]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 633 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __UpperCamelCase :
def __init__( self ) -> Any:
a : str = psutil.Process()
a : str = False
def __a ( self ) -> str:
a : List[Any] = -1
while True:
a : str = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __a ( self ) -> List[str]:
a : Dict = True
a : Optional[Any] = threading.Thread(target=self.peak_monitor )
a : List[Any] = True
self.thread.start()
def __a ( self ) -> List[str]:
a : Optional[int] = False
self.thread.join()
return self.cpu_memory_peak
a : Tuple = PeakCPUMemory()
def _SCREAMING_SNAKE_CASE ( ) ->List[Any]:
'''simple docstring'''
a : List[str] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a : Tuple = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a : Any = torch.cuda.memory_allocated(_lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
a : int = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
a : Union[str, Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a : Optional[Any] = (torch.cuda.memory_allocated(_lowercase ) - start_measures[str(_lowercase )]) / 2**20
a : Optional[Any] = (torch.cuda.max_memory_allocated(_lowercase ) - start_measures[str(_lowercase )]) / 2**20
return measures
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_lowercase )]:.2f}MiB""" )
a : str = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 633 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__SCREAMING_SNAKE_CASE : Dict =logging.getLogger(__name__)
@dataclass
class A_ ( __a ):
_A :Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_A :bool = field(default=__a , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_A :bool = field(
default=__a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_A :bool = field(default=__a , metadata={'''help''': '''whether to use adafactor'''} )
_A :Optional[float] = field(
default=__a , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_A :Optional[float] = field(
default=__a , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_A :Optional[float] = field(default=__a , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_A :Optional[float] = field(
default=__a , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_A :Optional[str] = field(
default='''linear''' , metadata={'''help''': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 72 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE : List[Any] ='''.'''
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : Dict =[]
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE : Optional[Any] =line.strip()
__SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 72 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
A__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location='''cpu''' )
return sd
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=rename_keys_prefix ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = OrderedDict()
snake_case__ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case__ : Dict = key
for name_pair in rename_keys_prefix:
snake_case__ : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
snake_case__ : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case__ : List[Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case__ : str = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case__ : str = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case__ : Dict = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case__ : Any = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case__ : List[Any] = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case__ : int = {'''visual_embedding_dim''': 512}
snake_case__ : Any = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case__ : Optional[int] = {'''visual_embedding_dim''': 2048}
snake_case__ : Union[str, Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case__ : int = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case__ : List[Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case__ : Optional[int] = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case__ : Any = '''nlvr'''
snake_case__ : str = VisualBertConfig(**__lowerCAmelCase )
# Load State Dict
snake_case__ : str = load_state_dict(__lowerCAmelCase )
snake_case__ : Optional[Any] = get_new_dict(__lowerCAmelCase , __lowerCAmelCase )
if model_type == "pretraining":
snake_case__ : Union[str, Any] = VisualBertForPreTraining(__lowerCAmelCase )
elif model_type == "vqa":
snake_case__ : Optional[int] = VisualBertForQuestionAnswering(__lowerCAmelCase )
elif model_type == "nlvr":
snake_case__ : List[str] = VisualBertForVisualReasoning(__lowerCAmelCase )
elif model_type == "multichoice":
snake_case__ : Optional[int] = VisualBertForMultipleChoice(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
A__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 252 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = " " ) -> list:
"""simple docstring"""
snake_case__ : str = []
snake_case__ : int = 0
for index, char in enumerate(__lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
snake_case__ : Dict = index + 1
elif index + 1 == len(__lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: Tuple = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """bert"""
def __init__( self : Tuple , __snake_case : Union[str, Any]=30522 , __snake_case : int=768 , __snake_case : Dict=12 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=3072 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : Any=0.1 , __snake_case : int=512 , __snake_case : Any=2 , __snake_case : Optional[int]=0.02 , __snake_case : Tuple=1E-12 , __snake_case : List[Any]=0 , __snake_case : Union[str, Any]="absolute" , __snake_case : str=True , __snake_case : List[str]=None , **__snake_case : Optional[Any] , ) -> int:
super().__init__(pad_token_id=__snake_case , **__snake_case )
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : Dict = type_vocab_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : int = position_embedding_type
UpperCAmelCase : Dict = use_cache
UpperCAmelCase : Any = classifier_dropout
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
@property
def A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 528 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__: Union[str, Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 528 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Dict = list(lowerCAmelCase__ )
snake_case : List[str] = list(lowerCAmelCase__ )
snake_case : Any = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count += 1
snake_case : int = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : str = []
while True:
snake_case : Dict = ['$'] * len(lowerCAmelCase__ )
snake_case : str = []
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1 ,len(lowerCAmelCase__ ) ):
snake_case : Optional[Any] = compare_string(binary[i] ,binary[j] )
if k is False:
snake_case : str = '*'
snake_case : List[Any] = '*'
temp.append("""X""" )
for i in range(len(lowerCAmelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase__ ) == 0:
return pi
snake_case : Optional[Any] = list(set(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
snake_case : Optional[Any] = []
for minterm in minterms:
snake_case : int = ''
for _ in range(lowerCAmelCase__ ):
snake_case : Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase__ )
return temp
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[int] = list(lowerCAmelCase__ )
snake_case : List[Any] = list(lowerCAmelCase__ )
snake_case : Optional[Any] = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Optional[Any] = []
snake_case : str = [0] * len(lowerCAmelCase__ )
for i in range(len(chart[0] ) ):
snake_case : Union[str, Any] = 0
snake_case : Dict = -1
for j in range(len(lowerCAmelCase__ ) ):
if chart[j][i] == 1:
count += 1
snake_case : int = j
if count == 1:
snake_case : Any = 1
for i in range(len(lowerCAmelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase__ ) ):
snake_case : int = 0
temp.append(prime_implicants[i] )
while True:
snake_case : int = 0
snake_case : Any = -1
snake_case : str = 0
for i in range(len(lowerCAmelCase__ ) ):
snake_case : Optional[int] = chart[i].count(1 )
if count_n > max_n:
snake_case : str = count_n
snake_case : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase__ ) ):
snake_case : List[Any] = 0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
snake_case : Optional[int] = [[0 for x in range(len(lowerCAmelCase__ ) )] for x in range(len(lowerCAmelCase__ ) )]
for i in range(len(lowerCAmelCase__ ) ):
snake_case : int = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,lowerCAmelCase__ ):
snake_case : Union[str, Any] = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[Any] = int(input("""Enter the no. of variables\n""" ) )
snake_case : List[Any] = [
float(lowerCAmelCase__ )
for x in input(
"""Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split()
]
snake_case : List[str] = decimal_to_binary(lowerCAmelCase__ ,lowerCAmelCase__ )
snake_case : Optional[int] = check(lowerCAmelCase__ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase__ )
snake_case : Optional[int] = prime_implicant_chart(lowerCAmelCase__ ,lowerCAmelCase__ )
snake_case : Dict = selection(lowerCAmelCase__ ,lowerCAmelCase__ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 587 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase_ ( snake_case__ ):
_a : Union[str, Any] = (DPMSolverSDEScheduler,)
_a : List[Any] = 1_0
def __a ( self : Any , **lowerCamelCase : str ):
lowerCamelCase_ : Any = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase )
return config
def __a ( self : Tuple ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __a ( self : int ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __a ( self : Any ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __a ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __a ( self : Optional[int] ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : str = self.get_scheduler_config()
lowerCamelCase_ : Optional[Any] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Optional[Any] = self.dummy_model()
lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : Union[str, Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : str = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = output.prev_sample
lowerCamelCase_ : List[str] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Tuple ):
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ : Tuple = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : List[Any] = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Dict = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __a ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : Dict = self.dummy_model()
lowerCamelCase_ : Dict = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ : Tuple = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : List[str] = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[int] = output.prev_sample
lowerCamelCase_ : List[Any] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : str = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __a ( self : Any ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : Union[str, Any] = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
lowerCamelCase_ : Dict = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = model(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Any = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 364 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
__magic_name__ :Optional[Any] = "pegasus"
__magic_name__ :List[str] = ["past_key_values"]
__magic_name__ :Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __UpperCAmelCase=5_0_2_6_5 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=1_6 , __UpperCAmelCase=1_2 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase=1_6 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = vocab_size
lowerCAmelCase__ :Union[str, Any] = max_position_embeddings
lowerCAmelCase__ :int = d_model
lowerCAmelCase__ :Dict = encoder_ffn_dim
lowerCAmelCase__ :Any = encoder_layers
lowerCAmelCase__ :List[Any] = encoder_attention_heads
lowerCAmelCase__ :Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ :Optional[Any] = decoder_layers
lowerCAmelCase__ :List[str] = decoder_attention_heads
lowerCAmelCase__ :Tuple = dropout
lowerCAmelCase__ :str = attention_dropout
lowerCAmelCase__ :str = activation_dropout
lowerCAmelCase__ :Any = activation_function
lowerCAmelCase__ :List[Any] = init_std
lowerCAmelCase__ :List[str] = encoder_layerdrop
lowerCAmelCase__ :List[Any] = decoder_layerdrop
lowerCAmelCase__ :str = use_cache
lowerCAmelCase__ :Dict = encoder_layers
lowerCAmelCase__ :List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
@property
def snake_case ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case ( self ):
'''simple docstring'''
return self.d_model
| 710 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""image_processor""", """tokenizer"""]
__magic_name__ :str = """BlipImageProcessor"""
__magic_name__ :Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = False
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.image_processor
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCAmelCase__ :int = self.tokenizer
lowerCAmelCase__ :str = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase__ :Dict = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ :str = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
else:
lowerCAmelCase__ :Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCAmelCase )
return encoding_image_processor
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.tokenizer.model_input_names
lowerCAmelCase__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 560 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Optional[Any] ,lowerCamelCase : int = 768 ,):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(1 ,lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Union[str, torch.device]] = None ,lowerCamelCase : Optional[torch.dtype] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds * self.std) + self.mean
return embeds
| 109 | def _lowerCamelCase ( ):
return 1
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else five_pence(x - 5) + two_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else ten_pence(x - 10) + five_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else one_pound(x - 1_00) + fifty_pence(a_)
def _lowerCamelCase ( a_ : int):
return 0 if x < 0 else two_pound(x - 2_00) + one_pound(a_)
def _lowerCamelCase ( a_ : int = 2_00):
return two_pound(a_)
if __name__ == "__main__":
print(solution(int(input().strip())))
| 166 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase__ ='2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase__ =concatenate_datasets
lowercase__ =DownloadConfig
lowercase__ =DownloadManager
lowercase__ =DownloadMode
lowercase__ =DownloadConfig
lowercase__ =DownloadMode
lowercase__ =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _snake_case ( A ) -> np.ndarray:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def _snake_case ( A ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def _snake_case ( A , A ) -> np.ndarray:
lowerCAmelCase__ = np.zeros_like(A )
lowerCAmelCase__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 90 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConsistencyModelPipeline
_SCREAMING_SNAKE_CASE : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_SCREAMING_SNAKE_CASE : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_SCREAMING_SNAKE_CASE : Tuple = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def lowerCAmelCase ( self : List[Any] ):
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def lowerCAmelCase ( self : Dict , snake_case_ : Dict=False ):
if class_cond:
__snake_case = self.dummy_cond_unet
else:
__snake_case = self.dummy_uncond_unet
# Default to CM multistep sampler
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowerCAmelCase ( self : Dict , snake_case_ : Dict , snake_case_ : int=0 ):
if str(snake_case_ ).startswith("mps" ):
__snake_case = torch.manual_seed(snake_case_ )
else:
__snake_case = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__snake_case = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def lowerCAmelCase ( self : Tuple ):
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**snake_case_ )
__snake_case = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_dummy_inputs(snake_case_ )
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase ( self : Dict ):
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=snake_case_ )
__snake_case = ConsistencyModelPipeline(**snake_case_ )
__snake_case = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_dummy_inputs(snake_case_ )
__snake_case = 0
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase ( self : Tuple ):
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = ConsistencyModelPipeline(**snake_case_ )
__snake_case = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_dummy_inputs(snake_case_ )
__snake_case = 1
__snake_case = None
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase ( self : int ):
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components(class_cond=snake_case_ )
__snake_case = ConsistencyModelPipeline(**snake_case_ )
__snake_case = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_dummy_inputs(snake_case_ )
__snake_case = 1
__snake_case = None
__snake_case = 0
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 32, 32, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 , snake_case_ : Any=False , snake_case_ : Tuple="cpu" , snake_case_ : Any=torch.floataa , snake_case_ : Any=(1, 3, 64, 64) ):
__snake_case = torch.manual_seed(snake_case_ )
__snake_case = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__snake_case = self.get_fixed_latents(seed=snake_case_ , device=snake_case_ , dtype=snake_case_ , shape=snake_case_ )
__snake_case = latents
return inputs
def lowerCAmelCase ( self : Any , snake_case_ : Dict=0 , snake_case_ : int="cpu" , snake_case_ : Optional[int]=torch.floataa , snake_case_ : Optional[int]=(1, 3, 64, 64) ):
if type(snake_case_ ) == str:
__snake_case = torch.device(snake_case_ )
__snake_case = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__snake_case = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
return latents
def lowerCAmelCase ( self : Dict ):
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_inputs()
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase ( self : List[Any] ):
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_inputs()
__snake_case = 1
__snake_case = None
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowerCAmelCase ( self : Optional[Any] ):
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowerCAmelCase ( self : Any ):
__snake_case = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__snake_case = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__snake_case = ConsistencyModelPipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(torch_device=snake_case_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=snake_case_ )
__snake_case = self.get_inputs(get_fixed_latents=snake_case_ , device=snake_case_ )
__snake_case = 1
__snake_case = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=snake_case_ , enable_math=snake_case_ , enable_mem_efficient=snake_case_ ):
__snake_case = pipe(**snake_case_ ).images
assert image.shape == (1, 64, 64, 3)
__snake_case = image[0, -3:, -3:, -1]
__snake_case = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 614 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if config_name_or_path is None:
a__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
a__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a__ = question_encoder_name_or_path
a__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
a__ = RagConfig.from_pretrained(_lowercase )
a__ = AutoConfig.from_pretrained(_lowercase )
a__ = AutoConfig.from_pretrained(_lowercase )
a__ = gen_config
a__ = question_encoder_config
a__ = model_class.from_pretrained_question_encoder_generator(
_lowercase , _lowercase , config=_lowercase )
rag_model.save_pretrained(_lowercase )
# Sanity check.
model_class.from_pretrained(_lowercase )
# Save tokenizers.
a__ = AutoTokenizer.from_pretrained(_lowercase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
a__ = AutoTokenizer.from_pretrained(_lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
UpperCamelCase_ : List[Any] = parser.parse_args()
UpperCamelCase_ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 331 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = BloomTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = '''tokenizer_file'''
UpperCamelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCAmelCase_ ( self : Dict ):
super().setUp()
a__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Dict ,**a__ : List[str] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**a__ )
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.get_rust_tokenizer()
a__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
a__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
a__ = tokenizer.batch_encode_plus(a__ )["input_ids"]
self.assertListEqual(a__ ,a__ )
a__ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Tuple ,a__ : List[str]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(a__ ,**a__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a__ = "This is a simple input"
a__ = ["This is a simple input 1", "This is a simple input 2"]
a__ = ("This is a simple input", "This is a pair")
a__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.encode_plus(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
tokenizer_r.encode(a__ ,max_length=a__ )
tokenizer_r.batch_encode_plus(a__ ,max_length=a__ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
a__ = None # Hotfixing padding = None
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Simple input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding="max_length" )
# Pair input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding="max_length" ,)
def lowerCAmelCase_ ( self : Any ):
a__ = self.get_rust_tokenizer()
a__ = load_dataset("xnli" ,"all_languages" ,split="test" ,streaming=a__ )
a__ = next(iter(a__ ) )["premise"] # pick up one data
a__ = list(sample_data.values() )
a__ = list(map(tokenizer.encode ,a__ ) )
a__ = [tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ ) for x in output_tokens]
self.assertListEqual(a__ ,a__ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 331 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]=sys.maxsize ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''bilinear'''
lowercase : Optional[Any] =max_size
lowercase : List[Any] =short_edge_length
def __call__( self : int , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : Any =[]
for img in imgs:
lowercase , lowercase : Optional[Any] =img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase : str =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase : int =size * 1.0 / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
lowercase , lowercase : Dict =size, scale * w
else:
lowercase , lowercase : str =scale * h, size
if max(UpperCAmelCase , UpperCAmelCase ) > self.max_size:
lowercase : Any =self.max_size * 1.0 / max(UpperCAmelCase , UpperCAmelCase )
lowercase : Tuple =newh * scale
lowercase : Any =neww * scale
lowercase : List[str] =int(neww + 0.5 )
lowercase : List[str] =int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase : Optional[int] =Image.fromarray(UpperCAmelCase )
lowercase : List[str] =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase : Optional[Any] =np.asarray(UpperCAmelCase )
else:
lowercase : Any =img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase : Union[str, Any] =nn.functional.interpolate(
UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase ).squeeze(0 )
img_augs.append(UpperCAmelCase )
return img_augs
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase : Any =cfg.INPUT.FORMAT
lowercase : List[Any] =cfg.SIZE_DIVISIBILITY
lowercase : List[str] =cfg.PAD_VALUE
lowercase : Dict =cfg.INPUT.MAX_SIZE_TEST
lowercase : Optional[Any] =cfg.MODEL.DEVICE
lowercase : int =torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase : Tuple =torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase : int =lambda UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A__ ( self : List[Any] , UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =tuple(max(UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
lowercase : Union[str, Any] =[im.shape[-2:] for im in images]
lowercase : Tuple =[
nn.functional.pad(
UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase , UpperCAmelCase )
]
return torch.stack(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
def __call__( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Any =[images]
if single_image:
assert len(UpperCAmelCase ) == 1
for i in range(len(UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase , images.pop(UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase : Union[str, Any] =torch.tensor([im.shape[:2] for im in images] )
lowercase : Dict =self.aug(UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase : List[str] =[self.normalizer(UpperCAmelCase ) for x in images]
# now pad them to do the following operations
lowercase , lowercase : Dict =self.pad(UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase : Union[str, Any] =torch.true_divide(UpperCAmelCase , UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowercase_ ( __A : Tuple , __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowercase_ ( __A : Dict , __A : Tuple[int, int] ) -> Union[str, Any]:
"""simple docstring"""
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase : Optional[Any] =box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 8 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=100 , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=4 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=None , __lowercase=[0, 1, 2, 3] , ):
"""simple docstring"""
__A : Dict = parent
__A : int = 100
__A : int = batch_size
__A : str = image_size
__A : Dict = patch_size
__A : Dict = num_channels
__A : Optional[int] = is_training
__A : List[str] = use_labels
__A : Dict = hidden_size
__A : List[str] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : str = intermediate_size
__A : Union[str, Any] = hidden_act
__A : int = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Dict = type_sequence_label_size
__A : Dict = initializer_range
__A : str = scope
__A : Tuple = out_indices
__A : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A : List[str] = (image_size // patch_size) ** 2
__A : List[Any] = num_patches + 1
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Dict = None
__A : Tuple = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case__ ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : Optional[int] = BeitModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : int = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : int = BeitForMaskedImageModeling(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : str = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : Any = self.type_sequence_label_size
__A : Any = BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A : List[str] = 1
__A : int = BeitForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : str = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[str] = self.num_labels
__A : List[str] = BeitForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[Any] = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__A : int = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : Dict = config_and_inputs
__A : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : Union[str, Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : str = False
__lowercase : Dict = False
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[Any] = BeitModelTester(self )
__A : Union[str, Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def snake_case__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def snake_case__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case__ ( self ):
"""simple docstring"""
pass
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(__lowercase )
__A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Dict = [*signature.parameters.keys()]
__A : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def snake_case__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]:
continue
__A : Dict = model_class(__lowercase )
model.to(__lowercase )
model.train()
__A : Optional[int] = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__A : List[Any] = model(**__lowercase ).loss
loss.backward()
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__A : Union[str, Any] = False
__A : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__A : Any = model_class(__lowercase )
model.gradient_checkpointing_enable()
model.to(__lowercase )
model.train()
__A : Any = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
__A : Tuple = model(**__lowercase ).loss
loss.backward()
def snake_case__ ( self ):
"""simple docstring"""
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(config=__lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def snake_case__ ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = BeitModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _lowercase ( ):
__A : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__lowercase )
__A : Optional[Any] = self.default_image_processor
__A : Union[str, Any] = prepare_img()
__A : List[Any] = image_processor(images=__lowercase , return_tensors='pt' ).pixel_values.to(__lowercase )
# prepare bool_masked_pos
__A : Tuple = torch.ones((1, 196) , dtype=torch.bool ).to(__lowercase )
# forward pass
with torch.no_grad():
__A : Tuple = model(pixel_values=__lowercase , bool_masked_pos=__lowercase )
__A : str = outputs.logits
# verify the logits
__A : List[Any] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __lowercase )
__A : Optional[int] = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowercase , atol=1E-2 ) )
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__lowercase )
__A : Union[str, Any] = self.default_image_processor
__A : List[Any] = prepare_img()
__A : Optional[Any] = image_processor(images=__lowercase , return_tensors='pt' ).to(__lowercase )
# forward pass
with torch.no_grad():
__A : Any = model(**__lowercase )
__A : Optional[Any] = outputs.logits
# verify the logits
__A : List[str] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __lowercase )
__A : Optional[Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1E-4 ) )
__A : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : List[str] = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__lowercase )
__A : Union[str, Any] = self.default_image_processor
__A : Union[str, Any] = prepare_img()
__A : List[str] = image_processor(images=__lowercase , return_tensors='pt' ).to(__lowercase )
# forward pass
with torch.no_grad():
__A : List[str] = model(**__lowercase )
__A : str = outputs.logits
# verify the logits
__A : List[str] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __lowercase )
__A : Dict = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1E-4 ) )
__A : Union[str, Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __lowercase )
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__A : Any = model.to(__lowercase )
__A : List[str] = BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__A : Optional[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__A : int = Image.open(ds[0]['file'] )
__A : Union[str, Any] = image_processor(images=__lowercase , return_tensors='pt' ).to(__lowercase )
# forward pass
with torch.no_grad():
__A : Optional[Any] = model(**__lowercase )
__A : List[str] = outputs.logits
# verify the logits
__A : int = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __lowercase )
__A : Dict = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__A : Union[str, Any] = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__lowercase , )
else:
__A : Optional[int] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def snake_case__ ( self ):
"""simple docstring"""
__A : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__A : Optional[Any] = model.to(__lowercase )
__A : str = BeitImageProcessor(do_resize=__lowercase , size=640 , do_center_crop=__lowercase )
__A : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__A : int = Image.open(ds[0]['file'] )
__A : Any = image_processor(images=__lowercase , return_tensors='pt' ).to(__lowercase )
# forward pass
with torch.no_grad():
__A : Dict = model(**__lowercase )
__A : Tuple = outputs.logits.detach().cpu()
__A : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(500, 300)] )
__A : Tuple = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowercase )
__A : str = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__A : int = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 365 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
__A : Dict = ArgumentParser('Accelerate CLI tool', usage='accelerate <command> [<args>]', allow_abbrev=UpperCamelCase__ )
__A : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__A : Optional[Any] = parser.parse_args()
if not hasattr(UpperCamelCase__, 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 365 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 228 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A( a ):
snake_case_ = 42
@flax_register_to_config
class __A( nn.Module , a , a ):
snake_case_ = 3_2
snake_case_ = 4
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case_ = False
snake_case_ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_2_8_0
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> FrozenDict:
'''simple docstring'''
__a = (1, self.in_channels, self.sample_size, self.sample_size)
__a = jnp.zeros(_snake_case , dtype=jnp.floataa )
__a = jnp.ones((1,) , dtype=jnp.intaa )
__a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a = jax.random.split(_snake_case )
__a = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_snake_case , _snake_case , _snake_case , _snake_case )["params"]
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.block_out_channels
__a = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a = self.num_attention_heads or self.attention_head_dim
# input
__a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a = FlaxTimestepEmbedding(_snake_case , dtype=self.dtype )
__a = self.only_cross_attention
if isinstance(_snake_case , _snake_case ):
__a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case , _snake_case ):
__a = (num_attention_heads,) * len(self.down_block_types )
# down
__a = []
__a = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a = output_channel
__a = block_out_channels[i]
__a = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a = FlaxDownBlockaD(
in_channels=_snake_case , out_channels=_snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_snake_case )
__a = down_blocks
# mid
__a = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a = []
__a = list(reversed(_snake_case ) )
__a = list(reversed(_snake_case ) )
__a = list(reversed(_snake_case ) )
__a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a = output_channel
__a = reversed_block_out_channels[i]
__a = reversed_block_out_channels[min(i + 1 , len(_snake_case ) - 1 )]
__a = i == len(_snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a = FlaxCrossAttnUpBlockaD(
in_channels=_snake_case , out_channels=_snake_case , prev_output_channel=_snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a = FlaxUpBlockaD(
in_channels=_snake_case , out_channels=_snake_case , prev_output_channel=_snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_snake_case )
__a = output_channel
__a = up_blocks
# out
__a = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case = True , _snake_case = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(_snake_case , jnp.ndarray ):
__a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a = timesteps.astype(dtype=jnp.floataa )
__a = jnp.expand_dims(_snake_case , 0 )
__a = self.time_proj(_snake_case )
__a = self.time_embedding(_snake_case )
# 2. pre-process
__a = jnp.transpose(_snake_case , (0, 2, 3, 1) )
__a = self.conv_in(_snake_case )
# 3. down
__a = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case , _snake_case ):
__a , __a = down_block(_snake_case , _snake_case , _snake_case , deterministic=not train )
else:
__a , __a = down_block(_snake_case , _snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a = ()
for down_block_res_sample, down_block_additional_residual in zip(
_snake_case , _snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a = new_down_block_res_samples
# 4. mid
__a = self.mid_block(_snake_case , _snake_case , _snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a = down_block_res_samples[-(self.layers_per_block + 1) :]
__a = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_snake_case , _snake_case ):
__a = up_block(
_snake_case , temb=_snake_case , encoder_hidden_states=_snake_case , res_hidden_states_tuple=_snake_case , deterministic=not train , )
else:
__a = up_block(_snake_case , temb=_snake_case , res_hidden_states_tuple=_snake_case , deterministic=not train )
# 6. post-process
__a = self.conv_norm_out(_snake_case )
__a = nn.silu(_snake_case )
__a = self.conv_out(_snake_case )
__a = jnp.transpose(_snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_snake_case ) | 219 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger()
@dataclass
class __A:
snake_case_ = 42
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self , _snake_case ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def __call__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = Tracker(self.dest )(_snake_case ).parametrized
__a = Tracker(self.src )(_snake_case ).parametrized
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_snake_case )} operations while"""
F""" destination module has {len(_snake_case )}.""" )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = True ) -> str:
print(F"""Converting {name}...""" )
with torch.no_grad():
__a = timm.create_model(a__ , pretrained=a__ ).eval()
__a = ResNetForImageClassification(a__ ).eval()
__a = ModuleTransfer(src=a__ , dest=a__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(a__ )
assert torch.allclose(from_model(a__ ) , our_model(a__ ).logits ), "The model logits don't match the original one."
__a = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(a__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a__ , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a__ , )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCAmelCase ( a__ , a__ = None , a__ = True ) -> List[Any]:
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(a__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A : List[Any] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 219 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[Any]:
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self , lowerCAmelCase_ ) -> Union[str, Any]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Union[str, Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten '''
F'''with: \"{text}\".''' )
_snake_case = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: \"{self.new_user_input}\" new input '''
F'''ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_snake_case = text
def lowerCAmelCase ( self ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
self.generated_responses.append(lowerCamelCase__ )
def lowerCAmelCase ( self ) -> str:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> int:
_snake_case = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_snake_case = '''user''' if is_user else '''bot'''
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__lowerCAmelCase , r'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class UpperCamelCase_ ( __lowerCAmelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ) -> str:
_snake_case = super().__call__(lowerCamelCase__ , num_workers=lowerCamelCase__ , **lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=32 ) -> Dict[str, Any]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowerCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowerCamelCase__ )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=10 , **lowerCAmelCase_ ) -> List[Any]:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['''attention_mask'''][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowerCamelCase__ , **lowerCamelCase__ )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Optional[Any]:
_snake_case = model_outputs['''output_ids''']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ , )
_snake_case = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(lowerCamelCase__ )
return conversation
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) )
if len(lowerCamelCase__ ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowercase_ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__(self , A , A=False , A=False , A=False , A=None , A=None , A=None , A=None , A = None , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_a = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a = '''<|endoftext|>''' if eos_token is None else eos_token
_a = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a = unk_token if pad_token is None else pad_token
_a = eos_token if bos_token is None else bos_token
else:
_a = '''<pad>''' if pad_token is None else pad_token
_a = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# Used for whitespace normalization in input texts
# fmt : off
_a = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a = re.compile(
f'''[{''.join(map(A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' )
def __getstate__(self ) -> Tuple:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> str:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def a__ (self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def a__ (self , A ) -> str:
"""simple docstring"""
_a = self.non_printing_characters_re.sub('''''' , A )
# Normalize whitespaces
_a = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_a = unicodedata.normalize('''NFC''' , A )
return text
def a__ (self , A , **A ) -> List[str]:
"""simple docstring"""
_a = self.preprocess_text(A )
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(A )
def a__ (self , A ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(A )
@staticmethod
def a__ (A ) -> str:
"""simple docstring"""
return out_string
def a__ (self , A ) -> str:
"""simple docstring"""
_a = []
_a = ''''''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_a = True
_a = []
else:
current_sub_tokens.append(A )
_a = False
out_string += self.sp_model.decode(A )
return out_string
def a__ (self ) -> Dict[str, int]:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def a__ (self , A , A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(A , A ):
_a = self.preprocess_text(A )
_a = self.sp_model.encode(A )
else:
_a = [self.preprocess_text(A ) for t in text]
_a = self.sp_model.encode(A )
if return_tensors is True or return_tensors == "pt":
_a = torch.tensor(A )
return token_ids
def a__ (self , A ) -> str:
"""simple docstring"""
return self.sp_model.decode(A )
def a__ (self , A ) -> List[int]:
"""simple docstring"""
_a = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_a = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=A )
| 11 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
)
def __lowerCamelCase ( ) ->None:
snake_case__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ = math.log(len(UpperCAmelCase_ ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Optional[int] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = ['DeiTFeatureExtractor']
snake_case : Any = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 339 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
snake_case : List[Any] = {'UserAgent': UserAgent().random}
def lowercase__ ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = script.contents[0]
__lowercase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase__:
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = F'''https://www.instagram.com/{username}/'''
__lowercase = self.get_json()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = requests.get(self.url , headers=__UpperCAmelCase ).text
__lowercase = BeautifulSoup(__UpperCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def lowercase__ ( __UpperCamelCase : str = "github" ):
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
__lowercase = InstagramUser(__UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : int = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 339 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A = None, A = None, A = False, A = False, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(
features=A, cache_dir=A, keep_in_memory=A, streaming=A, num_proc=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = Generator(
cache_dir=A, features=A, generator=A, gen_kwargs=A, **A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : Optional[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
self.builder.download_and_prepare(
download_config=A, download_mode=A, verification_mode=A, base_path=A, num_proc=self.num_proc, )
SCREAMING_SNAKE_CASE : List[str] = self.builder.as_dataset(
split='train', verification_mode=A, in_memory=self.keep_in_memory )
return dataset
| 28 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCamelCase = 192
__lowerCamelCase = 768
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = [800, 1333]
__lowerCamelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = 330
__lowerCamelCase = 14
__lowerCamelCase = 6
__lowerCamelCase = 1320
elif "yolos_s" in yolos_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
elif "yolos_b" in yolos_name:
__lowerCamelCase = [800, 1344]
__lowerCamelCase = 91
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """coco-detection-id2label.json"""
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if "backbone" in name:
__lowerCamelCase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowerCamelCase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowerCamelCase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowerCamelCase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowerCamelCase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowerCamelCase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def lowerCamelCase__ ( A__ : str , A__ : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(A__ )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : str , A__ : List[Any] , A__ : Tuple , A__ : Tuple = False ):
'''simple docstring'''
__lowerCamelCase = get_yolos_config(A__ )
# load original state_dict
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowerCamelCase = YolosForObjectDetection(A__ )
model.eval()
__lowerCamelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCamelCase = 800 if yolos_name != """yolos_ti""" else 512
__lowerCamelCase = YolosImageProcessor(format="""coco_detection""" , size=A__ )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCamelCase = model(**A__ )
__lowerCamelCase, __lowerCamelCase = outputs.logits, outputs.pred_boxes
__lowerCamelCase, __lowerCamelCase = None, None
if yolos_name == "yolos_ti":
__lowerCamelCase = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowerCamelCase = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCamelCase = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowerCamelCase = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCamelCase = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowerCamelCase = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowerCamelCase = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowerCamelCase = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowerCamelCase = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
__lowerCamelCase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowerCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(A__ , organization="""hustvl""" )
model.push_to_hub(A__ , organization="""hustvl""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase ="\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase ="huggingface-tools/default-prompts"
__lowerCAmelCase ={"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 333 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase = "_"
if count > 1:
return False
else:
return "".join(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
while True:
UpperCAmelCase = ["$"] * len(_lowerCAmelCase )
UpperCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase = "*"
UpperCAmelCase = "*"
temp.append("X" )
for i in range(len(_lowerCAmelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCAmelCase ) == 0:
return pi
UpperCAmelCase = list(set(_lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
for minterm in minterms:
UpperCAmelCase = ""
for _ in range(_lowerCAmelCase ):
UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCAmelCase )
return temp
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = [0] * len(_lowerCAmelCase )
for i in range(len(chart[0] ) ):
UpperCAmelCase = 0
UpperCAmelCase = -1
for j in range(len(_lowerCAmelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase = j
if count == 1:
UpperCAmelCase = 1
for i in range(len(_lowerCAmelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCAmelCase ) ):
UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase = count_n
UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCAmelCase ) ):
UpperCAmelCase = 0
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [[0 for x in range(len(_lowerCAmelCase ) )] for x in range(len(_lowerCAmelCase ) )]
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase = prime_implicants[i].count("_" )
for j in range(len(_lowerCAmelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCAmelCase ):
UpperCAmelCase = 1
return chart
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = int(input("Enter the no. of variables\n" ) )
UpperCAmelCase = [
float(_lowerCAmelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCAmelCase = decimal_to_binary(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = check(_lowerCAmelCase )
print("Prime Implicants are:" )
print(_lowerCAmelCase )
UpperCAmelCase = prime_implicant_chart(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = selection(_lowerCAmelCase , _lowerCAmelCase )
print("Essential Prime Implicants are:" )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 333 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__UpperCAmelCase =datasets.logging.get_logger(__name__)
__UpperCAmelCase ="""\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__UpperCAmelCase ="""\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__UpperCAmelCase ="""
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __a ( A , A , A=False , A=False , A=True , A=False , A="dummy_doc" ) -> Tuple:
'''simple docstring'''
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(A , key_doc_lines[doc] , A )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(A , key_doc_lines[doc] , A , A )
A__ , A__ = reader.get_doc_mentions(A , sys_doc_lines[doc] , A )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(A , key_doc_lines[doc] , A , A )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(A , A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(A , A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(A , A )
A__ = reader.get_mention_assignments(A , A )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def __a ( A , A , A , A , A , A , A ) -> List[Any]:
'''simple docstring'''
A__ = get_coref_infos(A , A , A , A , A , A )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(A , A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
A__ = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(UpperCamelCase__ )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=UpperCamelCase__ , sys_lines=UpperCamelCase__ , metrics=UpperCamelCase__ , NP_only=UpperCamelCase__ , remove_nested=UpperCamelCase__ , keep_singletons=UpperCamelCase__ , min_span=UpperCamelCase__ , )
return score | 261 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : str = StableDiffusionInstructPixaPixPipeline
lowercase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A__ = CLIPTextModel(UpperCamelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
'''simple docstring'''
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" )
if str(UpperCamelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase__ )
else:
A__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = "french fries"
A__ = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = [inputs["prompt"]] * 2
A__ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
A__ = torch.from_numpy(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A__ = image / 2 + 0.5
A__ = image.permute(0 , 3 , 1 , 2 )
A__ = image.repeat(2 , 1 , 1 , 1 )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = self.get_dummy_inputs(UpperCamelCase__ )
A__ = sd_pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1]
A__ = [round(UpperCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(UpperCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_dummy_components()
A__ = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A__ = VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="pt" ) )[0]
A__ = components["vae"]
A__ = self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A__ = vae.encode(inputs[image_param] ).latent_dist.mode()
A__ = pipe(**UpperCamelCase__ )[0]
A__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase__ , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , UpperCamelCase__=0 ):
'''simple docstring'''
A__ = torch.manual_seed(UpperCamelCase__ )
A__ = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
A__ = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ )
A__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ ).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
A__ = 0
def callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
A__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A__ = False
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = self.get_inputs()
A__ = pipe(**UpperCamelCase__ )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = inputs["image"].resize((5_04, 5_04) )
A__ = "timbrooks/instruct-pix2pix"
A__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ = pipe(**UpperCamelCase__ )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
A__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 | 261 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__UpperCAmelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
__UpperCAmelCase : Tuple = value
else:
__UpperCAmelCase : Any = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : int = fairseq_model.state_dict()
__UpperCAmelCase : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCAmelCase : Dict = None
for name, value in fairseq_dict.items():
__UpperCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : List[str] = True
elif name.split(""".""" )[0] == "proj":
__UpperCAmelCase : Any = fairseq_model.proj
__UpperCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Optional[int] = True
if "*" in mapped_key:
__UpperCAmelCase : Tuple = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : Dict = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : Dict = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Union[str, Any] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : int = """bias"""
elif "weight" in name:
__UpperCAmelCase : Optional[Any] = """weight"""
else:
__UpperCAmelCase : int = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Dict = name.split(""".""" )
__UpperCAmelCase : Tuple = int(items[0] )
__UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCAmelCase : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : Dict = emb.weight.shape
__UpperCAmelCase : Union[str, Any] = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
__UpperCAmelCase : int = emb.weight.data
return lin_layer
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : List[str] = f.readlines()
__UpperCAmelCase : Union[str, Any] = [line.split(""" """ )[0] for line in lines]
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : str = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(_UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = WavaVecaConfig.from_pretrained(_UpperCamelCase )
__UpperCAmelCase : Dict = SpeechaTextaConfig.from_pretrained(
_UpperCamelCase , vocab_size=_UpperCamelCase , decoder_layers=_UpperCamelCase , do_stable_layer_norm=_UpperCamelCase )
__UpperCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__UpperCAmelCase : List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCAmelCase : Dict = WavaVecaModel(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase )
__UpperCAmelCase : List[Any] = SpeechaTextaForCausalLM(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
__UpperCAmelCase : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCAmelCase : int = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
__UpperCAmelCase : Dict = False
# add projection layer
__UpperCAmelCase : List[Any] = nn.Parameter(projection_layer.weight )
__UpperCAmelCase : Optional[int] = nn.Parameter(projection_layer.bias )
__UpperCAmelCase : Tuple = create_vocab_dict(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[Any] = SpeechaTextaTokenizer(os.path.join(_UpperCamelCase , """vocab.json""" ) )
tokenizer.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : int = hf_wavavec.config.to_dict()
__UpperCAmelCase : List[str] = tokenizer.pad_token_id
__UpperCAmelCase : Dict = tokenizer.bos_token_id
__UpperCAmelCase : List[str] = tokenizer.eos_token_id
__UpperCAmelCase : int = """speech_to_text_2"""
__UpperCAmelCase : List[Any] = """wav2vec2"""
__UpperCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 139 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase : Tuple = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : List[Any] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BartTokenizer
def __init__( self : int , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Tuple="replace" , UpperCamelCase : Optional[int]="<s>" , UpperCamelCase : str="</s>" , UpperCamelCase : str="</s>" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Any="<mask>" , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Tuple=True , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) )
__UpperCAmelCase : int = add_prefix_space
__UpperCAmelCase : List[Any] = pre_tok_class(**UpperCamelCase )
__UpperCAmelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCAmelCase : Union[str, Any] = """post_processor"""
__UpperCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
__UpperCAmelCase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase : int = tuple(state["""sep"""] )
if "cls" in state:
__UpperCAmelCase : Optional[int] = tuple(state["""cls"""] )
__UpperCAmelCase : int = False
if state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
__UpperCAmelCase : Dict = add_prefix_space
__UpperCAmelCase : Optional[int] = True
if state.get("""trim_offsets""" , UpperCamelCase ) != trim_offsets:
__UpperCAmelCase : str = trim_offsets
__UpperCAmelCase : int = True
if changes_to_apply:
__UpperCAmelCase : List[str] = getattr(UpperCamelCase , state.pop("""type""" ) )
__UpperCAmelCase : Tuple = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
__UpperCAmelCase : Any = value
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : int , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Dict=None ):
'''simple docstring'''
__UpperCAmelCase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 139 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : List[str] ={'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 714 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : str = MaskFormerConfig(backbone_config=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[Any] = 847
__SCREAMING_SNAKE_CASE : Optional[int] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Dict = 150
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Union[str, Any] = 171
__SCREAMING_SNAKE_CASE : Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__SCREAMING_SNAKE_CASE : Dict = 133
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[int] = 19
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Tuple = 65
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mapillary-vistas-id2label.json'''
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Any = {int(lowercase__ ): v for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__SCREAMING_SNAKE_CASE : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-dim :, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# fmt: off
__SCREAMING_SNAKE_CASE : Any = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Dict = get_maskformer_config(lowercase__ )
# load original state_dict
with open(lowercase__ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = pickle.load(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# update to torch tensors
for key, value in state_dict.items():
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowercase__ )
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerForInstanceSegmentation(lowercase__ )
model.eval()
for name, param in model.named_parameters():
print(lowercase__ , param.shape )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 65
elif "cityscapes" in model_name:
__SCREAMING_SNAKE_CASE : str = 65535
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 255
__SCREAMING_SNAKE_CASE : Dict = True if '''ade''' in model_name else False
__SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=lowercase__ , reduce_labels=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowercase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Any = model(**lowercase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 260 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCamelCase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_6 , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=1_4 , UpperCAmelCase=1_0 , UpperCAmelCase=1_9 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=2_5 , UpperCAmelCase=5 , ):
'''simple docstring'''
__UpperCAmelCase =d_model
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =prediction_length
__UpperCAmelCase =context_length
__UpperCAmelCase =cardinality
__UpperCAmelCase =num_time_features
__UpperCAmelCase =lags_sequence
__UpperCAmelCase =embedding_dimension
__UpperCAmelCase =is_training
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =context_length
__UpperCAmelCase =prediction_length + label_length
__UpperCAmelCase =label_length
__UpperCAmelCase =moving_average
__UpperCAmelCase =autocorrelation_factor
def A__ (self):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =config.context_length + max(config.lags_sequence)
__UpperCAmelCase =ids_tensor([self.batch_size, 1] , config.cardinality[0])
__UpperCAmelCase =floats_tensor([self.batch_size, _past_length, config.num_time_features])
__UpperCAmelCase =floats_tensor([self.batch_size, _past_length])
__UpperCAmelCase =floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
__UpperCAmelCase =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
__UpperCAmelCase =floats_tensor([self.batch_size, config.prediction_length])
__UpperCAmelCase ={
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.get_config()
__UpperCAmelCase =self.prepare_autoformer_inputs_dict(UpperCAmelCase)
return config, inputs_dict
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =AutoformerModel(config=UpperCAmelCase).to(UpperCAmelCase).eval()
__UpperCAmelCase =model(**UpperCAmelCase)
__UpperCAmelCase =outputs.encoder_last_hidden_state
__UpperCAmelCase =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase =model.get_encoder()
encoder.save_pretrained(UpperCAmelCase)
__UpperCAmelCase =AutoformerEncoder.from_pretrained(UpperCAmelCase).to(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =model.create_network_inputs(**UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
__UpperCAmelCase =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__UpperCAmelCase =encoder(inputs_embeds=UpperCAmelCase)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
__UpperCAmelCase =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
__UpperCAmelCase =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__UpperCAmelCase =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__UpperCAmelCase =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase =model.get_decoder()
decoder.save_pretrained(UpperCAmelCase)
__UpperCAmelCase =AutoformerDecoder.from_pretrained(UpperCAmelCase).to(UpperCAmelCase)
__UpperCAmelCase =decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ : int = (AutoformerForPrediction,) if is_torch_available() else ()
a_ : int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a_ : Union[str, Any] = False
a_ : Optional[Any] = False
a_ : Optional[int] = False
a_ : Union[str, Any] = False
a_ : List[str] = False
a_ : Optional[int] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =AutoformerModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase)
self.assertEqual(info['''missing_keys'''] , [])
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase)
@unittest.skip(reason='''Model has no tokens embeddings''')
def A__ (self):
'''simple docstring'''
pass
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =inspect.signature(getattr(UpperCAmelCase , '''forward'''))
# The main input is the name of the argument after `self`
__UpperCAmelCase =list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase =model_class(UpperCAmelCase)
__UpperCAmelCase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase =[*signature.parameters.keys()]
__UpperCAmelCase =[
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''')
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
])
self.assertListEqual(arg_names[: len(UpperCAmelCase)] , UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase =True
__UpperCAmelCase =getattr(self.model_tester , '''seq_length''' , UpperCAmelCase)
__UpperCAmelCase =getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase)
__UpperCAmelCase =getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase)
__UpperCAmelCase =getattr(self.model_tester , '''d_model''' , UpperCAmelCase)
__UpperCAmelCase =getattr(self.model_tester , '''num_attention_heads''' , UpperCAmelCase)
__UpperCAmelCase =d_model // num_attention_heads
for model_class in self.all_model_classes:
__UpperCAmelCase =True
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =model_class(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
__UpperCAmelCase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase =True
__UpperCAmelCase =model_class(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
__UpperCAmelCase =outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__UpperCAmelCase =len(UpperCAmelCase)
__UpperCAmelCase =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
# decoder attentions
__UpperCAmelCase =outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple))
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__UpperCAmelCase =outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple))
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__UpperCAmelCase =True
__UpperCAmelCase =True
__UpperCAmelCase =model_class(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
with torch.no_grad():
__UpperCAmelCase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
self.assertEqual(out_len + 2 , len(UpperCAmelCase))
__UpperCAmelCase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ (self):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( snake_case__="train-batch.pt" ) -> List[Any]:
__UpperCAmelCase =hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=snake_case__ , repo_type='''dataset''' )
__UpperCAmelCase =torch.load(snake_case__ , map_location=snake_case__ )
return batch
@require_torch
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(UpperCAmelCase)
__UpperCAmelCase =prepare_batch()
with torch.no_grad():
__UpperCAmelCase =model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
__UpperCAmelCase =torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , UpperCAmelCase)
__UpperCAmelCase =torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase)
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(UpperCAmelCase)
__UpperCAmelCase =prepare_batch('''val-batch.pt''')
with torch.no_grad():
__UpperCAmelCase =model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
__UpperCAmelCase =torch.Size((6_4, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , UpperCAmelCase)
__UpperCAmelCase =torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase)
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(UpperCAmelCase)
__UpperCAmelCase =prepare_batch('''val-batch.pt''')
with torch.no_grad():
__UpperCAmelCase =model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
__UpperCAmelCase =torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , UpperCAmelCase)
__UpperCAmelCase =torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase)
__UpperCAmelCase =outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1))
| 132 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '▁'
UpperCamelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase_ = {
'google/pegasus-xsum': 5_1_2,
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = VOCAB_FILES_NAMES
a_ : str = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PegasusTokenizer
a_ : str = ['''input_ids''', '''attention_mask''']
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=1_0_3 , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase)}, but is"""
f""" {type(UpperCAmelCase)}""")
__UpperCAmelCase =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase) , self.offset - 1)
]
if len(set(UpperCAmelCase)) != len(UpperCAmelCase):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
__UpperCAmelCase =additional_special_tokens_extended
else:
__UpperCAmelCase =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , pad_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
__UpperCAmelCase =vocab_file
__UpperCAmelCase =False if not self.vocab_file else True
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}""")
return [1 if x in all_special_ids else 0 for x in seq]
def A__ (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def A__ (self , UpperCAmelCase , UpperCAmelCase=None):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ (self , UpperCAmelCase , UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(UpperCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCAmelCase =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase):
copyfile(self.vocab_file , UpperCAmelCase)
return (out_vocab_file,)
| 132 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : List[str] =knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ : Dict =values[index] + knapsack(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 77 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A = 250_004
A = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = MBartTokenizer
lowercase_ = MBartTokenizerFast
lowercase_ = True
lowercase_ = True
def a_ ( self : str):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Dict):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
__UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
__UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
__UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
lowercase_ = "facebook/mbart-large-en-ro"
lowercase_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def a_ ( cls : int):
"""simple docstring"""
__UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
__UpperCAmelCase : Union[str, Any] = 1
return cls
def a_ ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids)
__UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCamelCase_)
__UpperCAmelCase : Tuple = 10
__UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , UpperCamelCase_)
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_)
@require_torch
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
__UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
__UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt")
__UpperCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt")
__UpperCAmelCase : int = targets["input_ids"]
__UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(UpperCamelCase_) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 77 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCAmelCase : Optional[Any] = parse(importlib.metadata.version('torch'))
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__magic_name__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_, lowercase_ ):
__magic_name__ = parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_, parse(lowercase_ ) )
def a__ ( A_, A_ ):
'''simple docstring'''
return compare_versions(lowercase_, lowercase_, lowercase_ )
| 707 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ )
if matches:
__magic_name__ = float(matches[1] )
__magic_name__ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__magic_name__ = 1001
__magic_name__ = """imagenet-1k-id2label.json"""
__magic_name__ = """huggingface/label-files"""
__magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) )
__magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()}
__magic_name__ = """background"""
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def a__ ( ):
'''simple docstring'''
__magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def a__ ( A_, A_, A_, A_=False ):
'''simple docstring'''
__magic_name__ = get_mobilenet_va_config(A_ )
# Load 🤗 model
__magic_name__ = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_, A_, A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__magic_name__ = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, )
__magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
__magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
__magic_name__ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], A_, atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("""Pushing to the hub...""" )
__magic_name__ = """google/""" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 76 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 16000 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = int(round(sample_rate * max_length ) )
if len(UpperCamelCase ) <= sample_length:
return wav
lowerCAmelCase__ : List[str] = randint(0 , len(UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__lowercase : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__lowercase : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__lowercase : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__lowercase : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__lowercase : float = field(
default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowercase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowercase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def UpperCAmelCase_ ( self ) -> List[Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" ,__UpperCAmelCase ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ : Dict = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCAmelCase__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowerCAmelCase__ : int = DatasetDict()
lowerCAmelCase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCAmelCase__ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCAmelCase__ : Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase ):
lowerCAmelCase__ : List[str] = []
for audio in batch[data_args.audio_column_name]:
lowerCAmelCase__ : Dict = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase )
lowerCAmelCase__ : List[Any] = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ : Dict = {model_input_name: inputs.get(UpperCamelCase )}
lowerCAmelCase__ : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase ):
lowerCAmelCase__ : str = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowerCAmelCase__ : str = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCAmelCase__ : Tuple = {model_input_name: inputs.get(UpperCamelCase )}
lowerCAmelCase__ : Tuple = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase__ : Dict = raw_datasets["""train"""].features[data_args.label_column_name].names
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = {}, {}
for i, label in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = str(UpperCamelCase )
lowerCAmelCase__ : Tuple = label
# Load the accuracy metric from the datasets package
lowerCAmelCase__ : List[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=eval_pred.label_ids )
lowerCAmelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ : Union[str, Any] = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ : Any = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
# Initialize our trainer
lowerCAmelCase__ : int = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , )
# Training
if training_args.do_train:
lowerCAmelCase__ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ : Dict = last_checkpoint
lowerCAmelCase__ : Dict = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase__ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCamelCase )
trainer.save_metrics("""eval""" , UpperCamelCase )
# Write model card and (optionally) push to hub
lowerCAmelCase__ : str = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
if __name__ == "__main__":
main()
| 565 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = TextToVideoSDPipeline
__lowercase : Any = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def UpperCAmelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
lowerCAmelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="""scaled_linear""" ,clip_sample=__UpperCAmelCase ,set_alpha_to_one=__UpperCAmelCase ,)
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
lowerCAmelCase__ : Union[str, Any] = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[Any]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = TextToVideoSDPipeline(**__UpperCAmelCase )
lowerCAmelCase__ : str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Any = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = """np"""
lowerCAmelCase__ : int = sd_pipe(**__UpperCAmelCase ).frames
lowerCAmelCase__ : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCAmelCase__ : Dict = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase ,expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCAmelCase_ ( self ) -> int:
pass
def UpperCAmelCase_ ( self ) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowerCAmelCase__ : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCAmelCase__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ : List[Any] = pipe.to("""cuda""" )
lowerCAmelCase__ : Tuple = """Spiderman is surfing"""
lowerCAmelCase__ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ : str = pipe(__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=25 ,output_type="""pt""" ).frames
lowerCAmelCase__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowerCAmelCase__ : int = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCAmelCase__ : List[Any] = pipe.to("""cuda""" )
lowerCAmelCase__ : List[str] = """Spiderman is surfing"""
lowerCAmelCase__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=2 ,output_type="""pt""" ).frames
lowerCAmelCase__ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 565 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase_ : Union[str, Any] = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(UpperCamelCase__ ), version.parse(UpperCamelCase__ ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ):
__A : Tuple = f"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$', UpperCamelCase__ ):
__A ,__A ,__A : Optional[Any] = requirement, None, None
else:
__A : Any = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)', UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f""" got {requirement}""" )
__A ,__A : Optional[Any] = match[0]
__A : Optional[int] = want_full.split(',' ) # there could be multiple requirements
__A : Union[str, Any] = {}
for w in want_range:
__A : str = re.findall(r'^([\s!=<>]{1,2})(.+)', UpperCamelCase__ )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f""" but got {requirement}""" )
__A ,__A : int = match[0]
__A : Optional[Any] = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__A : Dict = '.'.join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return
# check if any version is installed
try:
__A : Union[str, Any] = importlib.metadata.version(UpperCamelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : List[Any] ):
__A : int = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(UpperCamelCase__, UpperCamelCase__ )
| 540 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
def _lowercase ( ):
__A : Tuple = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path', type=UpperCamelCase__, default='data/dump.txt', help='The path to the data.' )
parser.add_argument('--tokenizer_type', type=UpperCamelCase__, default='bert', choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name', type=UpperCamelCase__, default='bert-base-uncased', help='The tokenizer to use.' )
parser.add_argument('--dump_file', type=UpperCamelCase__, default='data/dump', help='The dump file prefix.' )
__A : Tuple = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
__A : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
__A : Any = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
__A : List[Any] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
__A : Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__A : List[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>`
__A : Dict = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
__A : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__A : Any = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
__A : Union[str, Any] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, 'r', encoding='utf8' ) as fp:
__A : Optional[int] = fp.readlines()
logger.info('Start encoding' )
logger.info(f"""{len(UpperCamelCase__ )} examples to process.""" )
__A : int = []
__A : List[str] = 0
__A : Tuple = 10000
__A : List[str] = time.time()
for text in data:
__A : int = f"""{bos} {text.strip()} {sep}"""
__A : List[Any] = tokenizer.encode(UpperCamelCase__, add_special_tokens=UpperCamelCase__ )
rslt.append(UpperCamelCase__ )
iter += 1
if iter % interval == 0:
__A : Any = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
__A : Any = time.time()
logger.info('Finished binarization' )
logger.info(f"""{len(UpperCamelCase__ )} examples processed.""" )
__A : List[str] = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
__A : str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__A : Optional[int] = [np.uintaa(UpperCamelCase__ ) for d in rslt]
else:
__A : int = [np.intaa(UpperCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(UpperCamelCase__, 'wb' ) as handle:
pickle.dump(rslt_, UpperCamelCase__, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 540 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def snake_case__ ( *snake_case__, **snake_case__ ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Dict = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", )
lowercase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : int = image_classifier(snake_case__, candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case__ ), [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
], )
lowercase_ : List[str] = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ), [
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
], )
@require_tf
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""", framework="""tf""" )
lowercase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : str = image_classifier(snake_case__, candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(snake_case__ ), [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}], )
lowercase_ : Optional[Any] = image_classifier([image] * 5, candidate_labels=["""A""", """B""", """C"""], batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ), [
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
[
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
{"""score""": 0.333, """label""": ANY(snake_case__ )},
],
], )
@slow
@require_torch
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Dict = pipeline(
task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", )
# This is an image of 2 cats with remotes and no planes
lowercase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Any = image_classifier(snake_case__, candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(snake_case__ ), [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
], )
lowercase_ : Optional[Any] = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ), [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5, )
@slow
@require_tf
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : Union[str, Any] = pipeline(
task="""zero-shot-image-classification""", model="""openai/clip-vit-base-patch32""", framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
lowercase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = image_classifier(snake_case__, candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(snake_case__ ), [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
], )
lowercase_ : int = image_classifier([image] * 5, candidate_labels=["""cat""", """plane""", """remote"""], batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ), [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5, ) | 458 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = StableUnCLIPPipeline
__a : int = TEXT_TO_IMAGE_PARAMS
__a : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__a : Tuple = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = 32
lowercase_ : Tuple = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=snake_case__, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Tuple = PriorTransformer(
num_attention_heads=2, attention_head_dim=12, embedding_dim=snake_case__, num_layers=1, )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
variance_type="""fixed_small_log""", prediction_type="""sample""", num_train_timesteps=10_00, clip_sample=snake_case__, clip_sample_range=5.0, beta_schedule="""squaredcos_cap_v2""", )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
lowercase_ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=snake_case__, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, ) )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="""projection""", projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=snake_case__, layers_per_block=1, upcast_attention=snake_case__, use_linear_projection=snake_case__, )
torch.manual_seed(0 )
lowercase_ : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""", beta_start=0.00085, beta_end=0.012, prediction_type="""v_prediction""", set_alpha_to_one=snake_case__, steps_offset=1, )
torch.manual_seed(0 )
lowercase_ : str = AutoencoderKL()
lowercase_ : str = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def snake_case__ ( self, snake_case__, snake_case__=0 ) -> str:
"""simple docstring"""
if str(snake_case__ ).startswith("""mps""" ):
lowercase_ : Tuple = torch.manual_seed(snake_case__ )
else:
lowercase_ : int = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
lowercase_ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : List[str] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase_ : List[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : Dict = pipe("""anime turle""", generator=snake_case__, output_type="""np""" )
lowercase_ : str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__, snake_case__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : str = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""", torch_dtype=torch.floataa )
lowercase_ : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : List[Any] = pipe(
"""anime turtle""", prior_num_inference_steps=2, num_inference_steps=2, output_type="""np""", )
lowercase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 458 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = XLMProphetNetTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
A : Union[str, Any] = '''[PAD]'''
A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self ):
A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def snake_case ( self ):
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
A : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def snake_case ( self ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def snake_case ( self ):
A : Union[str, Any] = '''Hello World!'''
A : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case ( self ):
# fmt: off
A : List[Any] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 537 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str:
if attention_mask is None:
_lowercase : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, lowerCamelCase=0.0_2, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Optional[int] = is_training
_lowercase : List[str] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Any = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Union[str, Any] = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : Optional[int] = initializer_range
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)
_lowercase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa)), -1)
_lowercase : Union[str, Any] = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
_lowercase : Any = prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = 20
_lowercase : Dict = model_class_name(lowerCamelCase)
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='i4')
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : Optional[int] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
_lowercase : int = model.decode(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = 20
_lowercase : Tuple = model_class_name(lowerCamelCase)
_lowercase : Any = model.encode(inputs_dict['input_ids'])
_lowercase , _lowercase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
], axis=-1, )
_lowercase : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='i4')
_lowercase : List[str] = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
_lowercase : Tuple = model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase)
_lowercase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''')
@require_flax
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Optional[int] = 99
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_lowercase : Optional[Any] = input_ids.shape[0]
_lowercase : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Any = self._get_config_and_data()
_lowercase : Tuple = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = lm_model(input_ids=lowerCamelCase)
_lowercase : Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_lowercase : str = FlaxBlenderbotForConditionalGeneration(lowerCamelCase)
_lowercase : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa)
_lowercase : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa)
_lowercase : Tuple = lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase)
_lowercase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape, lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa)
_lowercase : Dict = shift_tokens_right(lowerCamelCase, 1, 2)
_lowercase : str = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
_lowercase : Union[str, Any] = np.equal(lowerCamelCase, 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(lowerCamelCase, n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0], 2).all())
@require_flax
class _lowerCamelCase( _a, unittest.TestCase, _a ):
lowercase_ : Any = True
lowercase_ : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = FlaxBlenderbotModelTester(self)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : Optional[Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model_class(lowerCamelCase)
@jax.jit
def encode_jitted(lowerCamelCase, lowerCamelCase=None, **lowerCamelCase):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)
with self.subTest('JIT Enabled'):
_lowercase : List[str] = encode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Union[str, Any] = encode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase : int = model_class(lowerCamelCase)
_lowercase : Union[str, Any] = model.encode(inputs_dict['input_ids'], inputs_dict['attention_mask'])
_lowercase : Optional[int] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase, lowerCamelCase, lowerCamelCase):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('JIT Enabled'):
_lowercase : Union[str, Any] = decode_jitted(**lowerCamelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
_lowercase : Optional[int] = decode_jitted(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : Optional[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1)) * model.config.eos_token_id
_lowercase : Optional[Any] = model(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@unittest.skipUnless(jax_device != 'cpu', '3B test too slow on CPU.')
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_lowercase : Any = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_lowercase : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B', from_pt=lowerCamelCase)
_lowercase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
_lowercase : int = ['Sam']
_lowercase : str = tokenizer(lowerCamelCase, return_tensors='jax')
_lowercase : Optional[Any] = model.generate(**lowerCamelCase, **lowerCamelCase)
_lowercase : Any = 'Sam is a great name. It means "sun" in Gaelic.'
_lowercase : Tuple = tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase)
assert generated_txt[0].strip() == tgt_text
| 89 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _SCREAMING_SNAKE_CASE (tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , UpperCamelCase : float , UpperCamelCase : Callable , UpperCamelCase : int , UpperCamelCase : float = 1.0 , UpperCamelCase : str = None , )->Optional[int]:
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[Any] = initial_learning_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = warmup_steps
__SCREAMING_SNAKE_CASE : Optional[int] = power
__SCREAMING_SNAKE_CASE : List[Any] = decay_schedule_fn
__SCREAMING_SNAKE_CASE : int = name
def __call__( self : List[Any] , UpperCamelCase : Any )->Optional[int]:
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(UpperCamelCase , tf.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__SCREAMING_SNAKE_CASE : Any = global_step_float / warmup_steps_float
__SCREAMING_SNAKE_CASE : Tuple = self.initial_learning_rate * tf.math.pow(UpperCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase , )
def __snake_case ( self : str )->List[str]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 0.9 , __lowerCamelCase : float = 0.999 , __lowerCamelCase : float = 1E-8 , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : Optional[List[str]] = None , ):
__SCREAMING_SNAKE_CASE : Tuple = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCamelCase , )
if num_warmup_steps:
__SCREAMING_SNAKE_CASE : List[str] = WarmUp(
initial_learning_rate=__lowerCamelCase , decay_schedule_fn=__lowerCamelCase , warmup_steps=__lowerCamelCase , )
if weight_decay_rate > 0.0:
__SCREAMING_SNAKE_CASE : int = AdamWeightDecay(
learning_rate=__lowerCamelCase , weight_decay_rate=__lowerCamelCase , beta_a=__lowerCamelCase , beta_a=__lowerCamelCase , epsilon=__lowerCamelCase , clipnorm=__lowerCamelCase , global_clipnorm=__lowerCamelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowerCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Any = tf.keras.optimizers.Adam(
learning_rate=__lowerCamelCase , beta_a=__lowerCamelCase , beta_a=__lowerCamelCase , epsilon=__lowerCamelCase , clipnorm=__lowerCamelCase , global_clipnorm=__lowerCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , UpperCamelCase : float = 0.9 , UpperCamelCase : float = 0.9_9_9 , UpperCamelCase : float = 1E-7 , UpperCamelCase : bool = False , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : str = "AdamWeightDecay" , **UpperCamelCase : int , )->List[Any]:
super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = weight_decay_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = include_in_weight_decay
__SCREAMING_SNAKE_CASE : Any = exclude_from_weight_decay
@classmethod
def __snake_case ( cls : Any , UpperCamelCase : Optional[int] )->List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = {"WarmUp": WarmUp}
return super(UpperCamelCase , cls ).from_config(UpperCamelCase , custom_objects=UpperCamelCase )
def __snake_case ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : int )->int:
super(UpperCamelCase , self )._prepare_local(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def __snake_case ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple )->Any:
__SCREAMING_SNAKE_CASE : List[str] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def __snake_case ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=None , **UpperCamelCase : str )->List[Any]:
__SCREAMING_SNAKE_CASE : int = list(zip(*UpperCamelCase ) )
return super(UpperCamelCase , self ).apply_gradients(zip(UpperCamelCase , UpperCamelCase ) , name=UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any )->Optional[Any]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__SCREAMING_SNAKE_CASE : Any = apply_state or {}
__SCREAMING_SNAKE_CASE : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__SCREAMING_SNAKE_CASE : Any = self._fallback_apply_state(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __snake_case ( self : str , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int=None )->List[Any]:
__SCREAMING_SNAKE_CASE : str = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self._decay_weights_op(UpperCamelCase , UpperCamelCase , UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase , self )._resource_apply_dense(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None )->List[Any]:
__SCREAMING_SNAKE_CASE : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = self._decay_weights_op(UpperCamelCase , UpperCamelCase , UpperCamelCase )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase , self )._resource_apply_sparse(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Tuple )->Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def __snake_case ( self : Dict , UpperCamelCase : Optional[Any] )->Tuple:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase , UpperCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase , UpperCamelCase ) is not None:
return False
return True
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : List[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Dict = None
@property
def __snake_case ( self : str )->str:
if self._accum_steps is None:
__SCREAMING_SNAKE_CASE : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __snake_case ( self : Any )->int:
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , UpperCamelCase : int )->Optional[Any]:
if not self._gradients:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase ) , trainable=UpperCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase )
self._accum_steps.assign_add(1 )
def __snake_case ( self : Any )->Any:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase ) )
| 710 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_lowerCamelCase = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase = sorted(arg_to_scheduler.keys())
_lowerCamelCase = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _SCREAMING_SNAKE_CASE (pl.LightningModule ):
def __init__( self : Dict , UpperCamelCase : argparse.Namespace , UpperCamelCase : str=None , UpperCamelCase : Tuple="base" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int , )->Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[Any] = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PretrainedConfig = config
__SCREAMING_SNAKE_CASE : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase , UpperCamelCase ):
assert hasattr(self.config , UpperCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PreTrainedTokenizer = tokenizer
__SCREAMING_SNAKE_CASE : str = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = model
def __snake_case ( self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : Tuple = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __snake_case ( self : int )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.model
__SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.weight"]
__SCREAMING_SNAKE_CASE : Optional[int] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE : Dict = Adafactor(
UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = AdamW(
UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE : List[Any] = optimizer
__SCREAMING_SNAKE_CASE : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Any )->Optional[Any]:
return self.validation_step(UpperCamelCase , UpperCamelCase )
def __snake_case ( self : List[Any] , UpperCamelCase : Tuple )->List[Any]:
return self.validation_end(UpperCamelCase )
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Tuple )->Optional[Any]:
if stage == "test":
__SCREAMING_SNAKE_CASE : List[str] = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = len(self.train_dataloader().dataset )
def __snake_case ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False )->List[Any]:
raise NotImplementedError("You must implement this for your task" )
def __snake_case ( self : List[Any] )->Union[str, Any]:
return self.train_loader
def __snake_case ( self : Tuple )->Union[str, Any]:
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Any:
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : int , UpperCamelCase : List[str] )->int:
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Dict[str, Any] )->None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.output_dir.joinpath("best_tfmr" )
__SCREAMING_SNAKE_CASE : List[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase )
self.tokenizer.save_pretrained(UpperCamelCase )
@staticmethod
def __snake_case ( UpperCamelCase : Any , UpperCamelCase : Optional[Any] )->List[str]:
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase , type=UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase ).parent / "test_run" / "cache" ) , type=UpperCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase )
parser.add_argument("--train_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--eval_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple )->Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] )->Tuple:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.lr_schedulers[0]["scheduler"]
__SCREAMING_SNAKE_CASE : Optional[int] = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase )
def __snake_case ( self : Tuple , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Union[str, Any]:
rank_zero_info("***** Validation results *****" )
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def __snake_case ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Optional[Any]:
rank_zero_info("***** Test results *****" )
__SCREAMING_SNAKE_CASE : List[Any] = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase , "w" ) as writer:
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=__lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=__lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=__lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def _lowerCAmelCase ( __lowerCamelCase : BaseTransformer , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=[] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE : Tuple = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCamelCase )
if logging_callback is None:
__SCREAMING_SNAKE_CASE : List[str] = LoggingCallback()
__SCREAMING_SNAKE_CASE : str = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE : Any = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE : List[Any] = "auto"
__SCREAMING_SNAKE_CASE : List[Any] = "ddp"
__SCREAMING_SNAKE_CASE : List[str] = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = "auto"
__SCREAMING_SNAKE_CASE : Optional[int] = pl.Trainer.from_argparse_args(
__lowerCamelCase , weights_summary=__lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCamelCase , )
if args.do_train:
trainer.fit(__lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 447 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _UpperCamelCase (a_ ):
snake_case_ = """data2vec-audio"""
def __init__( self , __UpperCamelCase=3_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-5 , __UpperCamelCase="gelu" , __UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_6 , __UpperCamelCase=1_9 , __UpperCamelCase=5 , __UpperCamelCase=0.0_5 , __UpperCamelCase=1_0 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=1_0 , __UpperCamelCase=0 , __UpperCamelCase="sum" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_5_6 , __UpperCamelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_1_2 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=None , **__UpperCamelCase , )-> str:
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = conv_pos_kernel_size
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = list(__UpperCamelCase )
__lowerCAmelCase = xvector_output_dim
@property
def __UpperCAmelCase ( self )-> Tuple:
return math.prod(self.conv_stride )
| 367 |
from __future__ import annotations
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = len(__snake_case )
# We need to create solution object to save path.
__lowerCAmelCase = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
__lowerCAmelCase = run_maze(__snake_case , 0 , 0 , __snake_case )
if solved:
print("\n".join(str(__snake_case ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = len(__snake_case )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase = 1
return True
__lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase = 1
# check for directions
if (
run_maze(__snake_case , i + 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j + 1 , __snake_case )
or run_maze(__snake_case , i - 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j - 1 , __snake_case )
):
return True
__lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | 1 |
"""simple docstring"""
from __future__ import annotations
a_ : int = '''#'''
class __lowercase:
'''simple docstring'''
def __init__( self ):
__lowerCamelCase : dict = {}
def snake_case_ ( self , __a ):
__lowerCamelCase : Union[str, Any] = self._trie
for char in text:
if char not in trie:
__lowerCamelCase : Any = {}
__lowerCamelCase : Any = trie[char]
__lowerCamelCase : Union[str, Any] = True
def snake_case_ ( self , __a ):
__lowerCamelCase : Any = self._trie
for char in prefix:
if char in trie:
__lowerCamelCase : Optional[Any] = trie[char]
else:
return []
return self._elements(__a )
def snake_case_ ( self , __a ):
__lowerCamelCase : Union[str, Any] = []
for c, v in d.items():
__lowerCamelCase : Optional[int] = [' '] if c == END else [(c + s) for s in self._elements(__a )]
result.extend(__a )
return tuple(__a )
a_ : Any = Trie()
a_ : Dict = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( A__: str ) -> tuple:
__lowerCamelCase : int = trie.find_word(A__ )
return tuple(string + word for word in suffixes )
def UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : Optional[int] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.0_1),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ):
__lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def snake_case_ ( self ):
__lowerCamelCase : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
__lowerCamelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : List[str] = c.n_embd + 1 # int
__lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
__lowerCamelCase : int = not c.scale_attn_weights # bool
__lowerCamelCase : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = PretrainedConfig()
__lowerCamelCase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCamelCase : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {", ".join(__a )}.''' )
def snake_case_ ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__a )
def snake_case_ ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Tuple = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : str = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCamelCase : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : Any = ['config.42.0.0.json']
__lowerCamelCase : Tuple = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) )
__lowerCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def snake_case_ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase : List[str] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCamelCase : Tuple = 'v4.0.0'
__lowerCamelCase , __lowerCamelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : Union[str, Any] = 'v3.0.0'
__lowerCamelCase : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 263 | 1 |
"""simple docstring"""
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, "src", "diffusers")
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_UpperCAmelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowercase__: int = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_UpperCAmelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowercase__: Optional[int] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_UpperCAmelCase , '''torch_and_transformers_and_onnx''' )
def _snake_case ( self ):
lowercase__: Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _UpperCAmelCase )
self.assertIn('''torch_and_transformers''' , _UpperCAmelCase )
self.assertIn('''flax_and_transformers''' , _UpperCAmelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , _UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def _snake_case ( self ):
lowercase__: Optional[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , '''\nCONSTANT = None\n''' )
lowercase__: Tuple = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase__: Dict = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowercase__: Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowercase__: str = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _UpperCAmelCase )
| 586 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "swin2sr"
_UpperCAmelCase :Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _UpperCAmelCase=64 , _UpperCAmelCase=1 , _UpperCAmelCase=3 , _UpperCAmelCase=180 , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase=8 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=2 , _UpperCAmelCase=1.0 , _UpperCAmelCase="1conv" , _UpperCAmelCase="pixelshuffle" , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = image_size
lowercase__: List[Any] = patch_size
lowercase__: Union[str, Any] = num_channels
lowercase__: List[str] = embed_dim
lowercase__: Optional[int] = depths
lowercase__: Union[str, Any] = len(_UpperCAmelCase )
lowercase__: Union[str, Any] = num_heads
lowercase__: str = window_size
lowercase__: List[Any] = mlp_ratio
lowercase__: List[Any] = qkv_bias
lowercase__: List[str] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Any = drop_path_rate
lowercase__: str = hidden_act
lowercase__: List[str] = use_absolute_embeddings
lowercase__: Tuple = layer_norm_eps
lowercase__: Optional[int] = initializer_range
lowercase__: Tuple = upscale
lowercase__: Optional[int] = img_range
lowercase__: List[Any] = resi_connection
lowercase__: Union[str, Any] = upsampler
| 586 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if "resnet-50" in model_name:
_lowerCAmelCase : int = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
_lowerCAmelCase : Optional[Any] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
_lowerCAmelCase : str = DetrConfig(use_timm_backbone=lowerCAmelCase__ , backbone_config=lowerCAmelCase__ )
# set label attributes
_lowerCAmelCase : str = "panoptic" in model_name
if is_panoptic:
_lowerCAmelCase : List[str] = 2_50
else:
_lowerCAmelCase : Tuple = 91
_lowerCAmelCase : List[Any] = "huggingface/label-files"
_lowerCAmelCase : int = "coco-detection-id2label.json"
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Any = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_lowerCAmelCase : int = idalabel
_lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = state_dict.pop(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = val
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ""
if is_panoptic:
_lowerCAmelCase : List[Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:2_56, :]
_lowerCAmelCase : int = in_proj_bias[:2_56]
_lowerCAmelCase : Dict = in_proj_weight[2_56:5_12, :]
_lowerCAmelCase : Tuple = in_proj_bias[2_56:5_12]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[-2_56:, :]
_lowerCAmelCase : Any = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[Any] = in_proj_weight[:2_56, :]
_lowerCAmelCase : str = in_proj_bias[:2_56]
_lowerCAmelCase : Optional[int] = in_proj_weight[2_56:5_12, :]
_lowerCAmelCase : List[Any] = in_proj_bias[2_56:5_12]
_lowerCAmelCase : Tuple = in_proj_weight[-2_56:, :]
_lowerCAmelCase : Dict = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
_lowerCAmelCase : int = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCAmelCase : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[:2_56, :]
_lowerCAmelCase : Optional[int] = in_proj_bias_cross_attn[:2_56]
_lowerCAmelCase : Optional[Any] = in_proj_weight_cross_attn[2_56:5_12, :]
_lowerCAmelCase : Tuple = in_proj_bias_cross_attn[2_56:5_12]
_lowerCAmelCase : int = in_proj_weight_cross_attn[-2_56:, :]
_lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[-2_56:]
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_detr_config(lowerCAmelCase__ )
# load original model from torch hub
_lowerCAmelCase : str = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"""Converting model {model_name}...""" )
_lowerCAmelCase : Union[str, Any] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase__ ).eval()
_lowerCAmelCase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase__ ):
if is_panoptic:
_lowerCAmelCase : Dict = "detr." + src
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ , is_panoptic=lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : Tuple = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_lowerCAmelCase : Optional[int] = state_dict.pop(lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCAmelCase : Optional[int] = state_dict.pop(lowerCAmelCase__ )
_lowerCAmelCase : int = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_lowerCAmelCase : Any = state_dict.pop(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_lowerCAmelCase : Optional[int] = state_dict.pop(lowerCAmelCase__ )
_lowerCAmelCase : Dict = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : List[Any] = DetrForSegmentation(lowerCAmelCase__ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion on an image
_lowerCAmelCase : Union[str, Any] = "coco_panoptic" if is_panoptic else "coco_detection"
_lowerCAmelCase : List[Any] = DetrImageProcessor(format=lowerCAmelCase__ )
_lowerCAmelCase : List[str] = processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : int = encoding["pixel_values"]
_lowerCAmelCase : List[str] = detr(lowerCAmelCase__ )
_lowerCAmelCase : Any = model(lowerCAmelCase__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
snake_case = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 587 | def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCAmelCase = BeautifulSoup(requests.get(__A ).text , '''html.parser''' )
UpperCAmelCase = soup.findAll('''h1''' )
UpperCAmelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__A , __A )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 210 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs()
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs()
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs()
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ort.SessionOptions()
snake_case: int = False
return options
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: Dict = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: List[Any] = torch.manual_seed(0 )
snake_case: str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Tuple = output.images
snake_case: Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: str = init_image.resize((1_28, 1_28) )
snake_case: Tuple = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
snake_case: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: Any = torch.manual_seed(0 )
snake_case: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Any = output.images
snake_case: int = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 329 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 |
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 655 | 1 |
from copy import deepcopy
class __a :
"""simple docstring"""
def __init__( self : Any ,_UpperCamelCase : list[int] | None = None ,_UpperCamelCase : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
SCREAMING_SNAKE_CASE__ =size
SCREAMING_SNAKE_CASE__ =[0] * size
elif arr is not None:
self.init(_UpperCamelCase )
else:
raise ValueError("""Either arr or size must be specified""" )
def __A ( self : List[Any] ,_UpperCamelCase : list[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =len(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =deepcopy(_UpperCamelCase )
for i in range(1 ,self.size ):
SCREAMING_SNAKE_CASE__ =self.next_(_UpperCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self : List[str] ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE__ =self.next_(_UpperCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __A ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def __A ( self : int ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE__ =self.next_(_UpperCamelCase )
def __A ( self : str ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> None:
'''simple docstring'''
self.add(_UpperCamelCase ,value - self.get(_UpperCamelCase ) )
def __A ( self : Optional[Any] ,_UpperCamelCase : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
SCREAMING_SNAKE_CASE__ =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE__ =self.prev(_UpperCamelCase )
return result
def __A ( self : Union[str, Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> int:
'''simple docstring'''
return self.prefix(_UpperCamelCase ) - self.prefix(_UpperCamelCase )
def __A ( self : List[str] ,_UpperCamelCase : int ) -> int:
'''simple docstring'''
return self.query(_UpperCamelCase ,index + 1 )
def __A ( self : List[str] ,_UpperCamelCase : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE__ =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE__ =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowerCamelCase_ = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE__ =int(re.match(R""".*layer_(\d*).*""", __UpperCamelCase )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def UpperCAmelCase_ ( __UpperCamelCase ):
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE__ =re.search(R"""[^\d](\d+)$""", str(__UpperCamelCase ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
SCREAMING_SNAKE_CASE__ =int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
# Construct model
if bloom_config_file == "":
SCREAMING_SNAKE_CASE__ =BloomConfig()
else:
SCREAMING_SNAKE_CASE__ =BloomConfig.from_json_file(__UpperCamelCase )
if shard_model:
SCREAMING_SNAKE_CASE__ =os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =sorted(filter(lambda __UpperCamelCase : s.startswith("""layer""" ) and "model_00" in s, __UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ ={"""weight_map""": {}, """metadata""": {}}
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =BloomConfig()
for j, file in enumerate(__UpperCamelCase ):
print("""Processing file: {}""".format(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ =file.replace("""model_00""", f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ =torch.load(os.path.join(__UpperCamelCase, __UpperCamelCase ), map_location="""cpu""" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ =list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ =temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ =temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ =torch.cat([tensors[key], temp[key]], dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ =tensors[key] / pretraining_tp
torch.save(
__UpperCamelCase, os.path.join(
__UpperCamelCase, """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ), str(len(__UpperCamelCase ) ).zfill(5 ) ), ), )
for key in tensors.keys():
SCREAMING_SNAKE_CASE__ =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE__ ="""pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ), str(len(__UpperCamelCase ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE__ =BloomConfig()
SCREAMING_SNAKE_CASE__ =pytorch_dump_folder_path + """/""" + CONFIG_NAME
SCREAMING_SNAKE_CASE__ =total_size
with open(__UpperCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCamelCase, WEIGHTS_NAME + """.index.json""" ), """w""", encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ =json.dumps(__UpperCamelCase, indent=2, sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =BloomModel(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =os.listdir(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =sorted(filter(lambda __UpperCamelCase : s.startswith("""layer""" ) and "model_00" in s, __UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =None
for i, file in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =None
for i in range(__UpperCamelCase ):
# load all TP files
SCREAMING_SNAKE_CASE__ =file.replace("""model_00""", f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ =torch.load(os.path.join(__UpperCamelCase, __UpperCamelCase ), map_location="""cpu""" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ =list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ =temp.pop(__UpperCamelCase )
if tensors is None:
SCREAMING_SNAKE_CASE__ =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ =torch.cat([tensors[key], temp[key]], dim=__UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ =tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE__ =model.load_state_dict(__UpperCamelCase, strict=__UpperCamelCase )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
SCREAMING_SNAKE_CASE__ =set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE__ =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(__UpperCamelCase, exist_ok=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ =pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE__ =model.to(config.torch_dtype )
torch.save(model.state_dict(), __UpperCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__UpperCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowerCamelCase_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 151 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = R"\w+[.]\d+"
__lowerCAmelCase = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
__lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
__lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowerCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 552 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = VideoMAEConfig()
set_architecture_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "finetuned" not in model_name:
__lowerCAmelCase = False
if "finetuned" in model_name:
__lowerCAmelCase = "huggingface/label-files"
if "kinetics" in model_name:
__lowerCAmelCase = 4_00
__lowerCAmelCase = "kinetics400-id2label.json"
elif "ssv2" in model_name:
__lowerCAmelCase = 1_74
__lowerCAmelCase = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
if "small" in model_name:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 3
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
elif "large" in model_name:
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 5_12
__lowerCAmelCase = 20_48
elif "huge" in model_name:
__lowerCAmelCase = 12_80
__lowerCAmelCase = 51_20
__lowerCAmelCase = 32
__lowerCAmelCase = 16
__lowerCAmelCase = 12
__lowerCAmelCase = 8
__lowerCAmelCase = 6_40
__lowerCAmelCase = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if "encoder." in name:
__lowerCAmelCase = name.replace("encoder." , "" )
if "cls_token" in name:
__lowerCAmelCase = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
__lowerCAmelCase = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
__lowerCAmelCase = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.attention" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
__lowerCAmelCase = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
__lowerCAmelCase = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
__lowerCAmelCase = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCAmelCase = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
__lowerCAmelCase = name.replace("head" , "classifier" )
return name
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith("encoder." ):
__lowerCAmelCase = key.replace("encoder." , "" )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
if key.startswith("decoder.blocks" ):
__lowerCAmelCase = config.decoder_hidden_size
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = "decoder.decoder_layers."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = "videomae.encoder.layer."
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val
return orig_state_dict
def _a ( ):
__lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = get_videomae_config(SCREAMING_SNAKE_CASE_ )
if "finetuned" in model_name:
__lowerCAmelCase = VideoMAEForVideoClassification(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
# download original checkpoint, hosted on Google Drive
__lowerCAmelCase = "pytorch_model.bin"
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
if "model" in files:
__lowerCAmelCase = files["model"]
else:
__lowerCAmelCase = files["module"]
__lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify model on basic input
__lowerCAmelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
if "finetuned" not in model_name:
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCAmelCase = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCAmelCase = torch.Size([1, 4_00] )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__lowerCAmelCase = torch.Size([1, 14_08, 15_36] )
__lowerCAmelCase = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCAmelCase = torch.Size([1, 1_74] )
__lowerCAmelCase = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCAmelCase = outputs.loss
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 552 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger('transformers.models.speecht5')
UpperCamelCase__ = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
UpperCamelCase__ = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
UpperCamelCase__ = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
UpperCamelCase__ = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
UpperCamelCase__ = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
UpperCamelCase__ = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
UpperCamelCase__ = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
UpperCamelCase__ = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
UpperCamelCase__ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
UpperCamelCase__ = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
UpperCamelCase__ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase_ : Dict = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
lowercase_ : List[Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
lowercase_ : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase_ : int = value
elif weight_type == "weight_g":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase_ : Tuple = value
elif weight_type == "bias":
lowercase_ : Tuple = value
elif weight_type == "running_mean":
lowercase_ : List[str] = value
elif weight_type == "running_var":
lowercase_ : Any = value
elif weight_type == "num_batches_tracked":
lowercase_ : List[str] = value
else:
lowercase_ : Dict = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase_ , lowercase_ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
if task == "s2t":
lowercase_ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder
lowercase_ : Tuple = MAPPING_S2T
lowercase_ : Any = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase_ : Tuple = None
lowercase_ : Dict = MAPPING_T2S
lowercase_ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase_ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
lowercase_ : Optional[int] = MAPPING_S2S
lowercase_ : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase , _UpperCamelCase ):
logger.info(F"""{name} was ignored""" )
continue
lowercase_ : int = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase_ , lowercase_ : int = key.split(".*." )
if prefix in name and suffix in name:
lowercase_ : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase_ : int = True
if "*" in mapped_key:
lowercase_ : Any = name.split(_UpperCamelCase )[0].split("." )[-2]
lowercase_ : Optional[int] = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
lowercase_ : Optional[Any] = "weight_g"
elif "weight_v" in name:
lowercase_ : str = "weight_v"
elif "bias" in name:
lowercase_ : Dict = "bias"
elif "weight" in name:
lowercase_ : int = "weight"
elif "running_mean" in name:
lowercase_ : Dict = "running_mean"
elif "running_var" in name:
lowercase_ : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
lowercase_ : Dict = "num_batches_tracked"
else:
lowercase_ : Tuple = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = full_name.split("conv_layers." )[-1]
lowercase_ : Dict = name.split("." )
lowercase_ : Optional[Any] = int(items[0] )
lowercase_ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase_ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase_ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase_ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase_ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
"""simple docstring"""
if config_path is not None:
lowercase_ : Union[str, Any] = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ : List[str] = SpeechTaConfig()
if task == "s2t":
lowercase_ : Any = config.max_text_positions
lowercase_ : List[str] = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
lowercase_ : Optional[Any] = 1876
lowercase_ : List[str] = 600
lowercase_ : Union[str, Any] = config.max_speech_positions
lowercase_ : str = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
lowercase_ : List[Any] = 1876
lowercase_ : List[str] = config.max_speech_positions
lowercase_ : List[Any] = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
lowercase_ : Union[str, Any] = SpeechTaTokenizer(_UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase_ : Optional[Any] = AddedToken("<mask>" , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
lowercase_ : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
lowercase_ : str = SpeechTaFeatureExtractor()
lowercase_ : Dict = SpeechTaProcessor(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase_ : str = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , _UpperCamelCase , _UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 620 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : int , a : Dict , a : Optional[Any]=1_3 , a : str=3_0 , a : Dict=2 , a : List[Any]=3 , a : Optional[int]=True , a : Tuple=True , a : List[Any]=3_2 , a : int=2 , a : List[str]=4 , a : Dict=3_7 , a : Optional[Any]="gelu" , a : List[str]=0.1 , a : Dict=0.1 , a : List[Any]=1_0 , a : Union[str, Any]=0.02 , a : Union[str, Any]=3 , a : int=None , a : Any=2 , ):
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : Any = image_size
lowercase_ : str = patch_size
lowercase_ : int = num_channels
lowercase_ : str = is_training
lowercase_ : str = use_labels
lowercase_ : List[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : str = num_patches + 2
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase__ ( self : Any , a : List[str] , a : str , a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTModel(config=a )
lowercase_ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[str] = TFDeiTForMaskedImageModeling(config=a )
lowercase_ : Union[str, Any] = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Tuple = TFDeiTForMaskedImageModeling(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : int , a : List[str] , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Union[str, Any] = TFDeiTForImageClassification(a )
lowercase_ : int = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : str = TFDeiTForImageClassification(a )
lowercase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Any = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = config_and_inputs
lowercase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Tuple = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCamelCase: Union[str, Any] = False
__lowerCamelCase: Any = False
__lowerCamelCase: List[str] = False
__lowerCamelCase: Optional[Any] = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = TFDeiTModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Dense ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(a )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def lowerCAmelCase__ ( self : Any , a : int , a : Union[str, Any] , a : Any=False ):
'''simple docstring'''
lowercase_ : Union[str, Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = TFDeiTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=a , return_tensors="tf" )
# forward pass
lowercase_ : int = model(**a )
# verify the logits
lowercase_ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 620 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["MobileNetV2FeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Any = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 1 |
def A__ ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return [tuple(snake_case_ )]
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
def generate(snake_case_ : int , snake_case_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= arr[k - 1], arr[0]
generate(k - 1 , snake_case_ )
generate(len(snake_case_ ) , snake_case_ )
return res
if __name__ == "__main__":
lowercase_ : Any = input('Enter numbers separated by a comma:\n').strip()
lowercase_ : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase : List[str] = Lock()
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __snake_case ( UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
A = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*UpperCamelCase__ )
A = odd_even_transposition(UpperCamelCase__ )
print('Sorted List\n' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 690 | 0 |
'''simple docstring'''
import requests
A: Tuple = "YOUR API KEY"
def _UpperCAmelCase ( a : str , a : str = giphy_api_key ) -> list:
"""simple docstring"""
lowercase_ : Dict = '+'.join(query.split() )
lowercase_ : str = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
lowercase_ : str = requests.get(a ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 7 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: List[Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a : Any , a : Dict=False , a : Union[str, Any]=False , a : Tuple=False ) -> List[str]:
"""simple docstring"""
lowercase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _UpperCAmelCase ( a : Dict , a : Tuple ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : str = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
lowercase_ : int = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : List[str] = in_proj_bias[: config.hidden_size]
lowercase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a , a )
def _UpperCAmelCase ( a : Optional[Any] , a : Tuple , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase_ : List[Any] = dct.pop(a )
lowercase_ : Dict = val
@torch.no_grad()
def _UpperCAmelCase ( a : List[Any] , a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : str = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=a )
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : str = False
if "vqa" in checkpoint_url:
lowercase_ : str = True
lowercase_ : Optional[int] = 3_1_2_9
lowercase_ : Any = 'huggingface/label-files'
lowercase_ : Optional[Any] = 'vqa2-id2label.json'
lowercase_ : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
lowercase_ : Optional[int] = {int(a ): v for k, v in idalabel.items()}
lowercase_ : List[Any] = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
lowercase_ : List[Any] = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
lowercase_ : Dict = True
lowercase_ : List[str] = 2
lowercase_ : Tuple = {0: 'False', 1: 'True'}
lowercase_ : Optional[int] = {v: k for k, v in config.idalabel.items()}
lowercase_ : int = 3
lowercase_ : Any = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
lowercase_ : int = True
lowercase_ : Tuple = ViltForMaskedLM(a )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
lowercase_ : Union[str, Any] = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
lowercase_ : str = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase_ , lowercase_ : Dict = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
lowercase_ : Optional[int] = ViltImageProcessor(size=3_8_4 )
lowercase_ : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowercase_ : Any = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase_ : Union[str, Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Optional[Any] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a ).raw )
lowercase_ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowercase_ : Union[str, Any] = processor(a , a , return_tensors='pt' )
lowercase_ : List[str] = processor(a , a , return_tensors='pt' )
lowercase_ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase_ : List[str] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a ).raw )
if mlm_model:
lowercase_ : Dict = 'a bunch of [MASK] laying on a [MASK].'
else:
lowercase_ : List[Any] = 'How many cats are there?'
lowercase_ : List[Any] = processor(a , a , return_tensors='pt' )
lowercase_ : Optional[int] = model(**a )
# Verify outputs
if mlm_model:
lowercase_ : Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] )
lowercase_ : Optional[Any] = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase_ : int = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase_ : Optional[Any] = torch.Size([1, 3_1_2_9] )
lowercase_ : Tuple = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase_ : Optional[Any] = torch.Size([1, 2] )
lowercase_ : Optional[Any] = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A: Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 7 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.