code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def A ( _A ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCamelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ):
"""simple docstring"""
snake_case_ :List[Any] = 2
while True:
if is_prime(UpperCamelCase__ ):
yield num
num += 1
def A ( _A = 2_000_000 ):
"""simple docstring"""
return sum(takewhile(lambda _A : x < n, prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( UpperCamelCase__: List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""test""" )
else:
A = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Any:
"""simple docstring"""
A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A = script_name
else:
A = f'--config_file={args.config_file} {script_name}'
A = ["""accelerate-launch"""] + test_args.split()
A = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = test_command_parser()
A = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 641 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 366 | """simple docstring"""
import math
def lowercase_ ( _lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( _lowerCamelCase: int = 10001 ) -> int:
'''simple docstring'''
try:
__lowerCamelCase : Optional[int] = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
__lowerCamelCase : list[int] = []
__lowerCamelCase : Tuple = 2
while len(_lowerCamelCase ) < nth:
if is_prime(_lowerCamelCase ):
primes.append(_lowerCamelCase )
num += 1
else:
num += 1
return primes[len(_lowerCamelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 366 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Optional[int] , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Any , ):
super().__init__(**A )
_UpperCAmelCase : str = size if size is not None else {"shortest_edge": 2_5_6}
_UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_UpperCAmelCase : Optional[Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : Optional[int] = resample
_UpperCAmelCase : Tuple = do_center_crop
_UpperCAmelCase : str = crop_size
_UpperCAmelCase : Optional[Any] = do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self : Union[str, Any] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ):
_UpperCAmelCase : str = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase : Any = get_resize_output_image_size(A , size=size["shortest_edge"] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def snake_case_ ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ):
_UpperCAmelCase : Optional[int] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def snake_case_ ( self : List[Any] , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : int ):
return rescale(A , scale=A , data_format=A , **A )
def snake_case_ ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : List[Any] , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_ ( self : int , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ):
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : int = size if size is not None else self.size
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Optional[Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Dict = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
_UpperCAmelCase : List[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_UpperCAmelCase : Dict = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_UpperCAmelCase : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
_UpperCAmelCase : Tuple = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
def snake_case_ ( self : List[str] , A : List[str] , A : List[Tuple] = None ):
_UpperCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A ):
_UpperCAmelCase : Any = target_sizes.numpy()
_UpperCAmelCase : List[Any] = []
for idx in range(len(A ) ):
_UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A )
_UpperCAmelCase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
_UpperCAmelCase : int = logits.argmax(dim=1 )
_UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 289 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'{test_file} instead.' )
_UpperCAmelCase : int = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase : Any = components[:-1] + [test_fn.replace(".py" , "" )]
_UpperCAmelCase : List[str] = ".".join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_module_path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , "setUp" ):
test.setUp()
_UpperCAmelCase : int = None
if hasattr(SCREAMING_SNAKE_CASE__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase : List[str] = test.model_tester.__class__
return model_tester
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = []
for test_class in test_classes:
_UpperCAmelCase : Optional[Any] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 289 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Any = '''▁'''
_UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
_UpperCAmelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
_UpperCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
_UpperCAmelCase : Any = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
_UpperCAmelCase : List[str] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ["input_ids"]
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , vocab_file=snake_case_ , encoding=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
lowercase =do_lower_case
lowercase =sentencepiece_model_ckpt
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase =self.load_vocab(filepath=snake_case_ )
else:
lowercase ={self.sp_model.id_to_piece(snake_case_ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase ={v: k for k, v in self.vocab.items()}
def _A( self , snake_case_ ):
if text is None:
return None
lowercase =self.tokenize(snake_case_ )
lowercase , lowercase ='''''', []
for i, ch in enumerate(snake_case_ ):
if ch in self.SP_CHAR_MAPPING:
lowercase =self.SP_CHAR_MAPPING.get(snake_case_ )
else:
lowercase =unicodedata.normalize('''NFKC''' , snake_case_ )
if self.is_whitespace(snake_case_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case_ ) )
lowercase , lowercase , lowercase =normalized_text, [], 0
if self.do_lower_case:
lowercase =text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase =token[1:]
lowercase =text[offset:].index(snake_case_ ) + offset
lowercase =start + len(snake_case_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase =end
return token_mapping
@property
def _A( self ):
return len(self.vocab )
def _A( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _A( self , snake_case_ ):
return "".join((self.SP_CHAR_MAPPING.get(snake_case_ , snake_case_ ) for c in text) )
def _A( self , snake_case_ , snake_case_=False , snake_case_=64 , snake_case_=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase =True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase =self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase =self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase =self.sp_model.EncodeAsPieces(snake_case_ )
else:
lowercase =self.sp_model.SampleEncodeAsPieces(snake_case_ , snake_case_ , snake_case_ )
lowercase =[]
for pi, piece in enumerate(snake_case_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case_ ) and pi != 0:
new_pieces.append(snake_case_ )
continue
else:
continue
lowercase =0
for i, chunk in enumerate(snake_case_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case_ ) or self.is_punct(snake_case_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case_ )
lowercase =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase =i
if len(snake_case_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _A( self , snake_case_ ):
lowercase =''''''.join(snake_case_ ).replace(snake_case_ , ''' ''' ).strip()
return out_string
def _A( self , snake_case_ ):
lowercase =self.convert_ids_to_tokens(snake_case_ )
lowercase =''''''.join(snake_case_ ).replace(snake_case_ , ''' ''' ).strip()
return out_string
def _A( self , snake_case_ ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def _A( self , snake_case_ ):
return self.reverse_vocab.get(snake_case_ , self.unk_token )
def _A( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _A( self , snake_case_ , snake_case_=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _A( self , snake_case_ , snake_case_=None , snake_case_=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def _A( self , snake_case_ , snake_case_ = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case_ ) + 1) + [1] * (len(snake_case_ ) + 3)
def _A( self , snake_case_ ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _A( self , snake_case_ ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _A( self , snake_case_ ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _A( self , snake_case_ ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case_ ) == 1:
lowercase =unicodedata.category(snake_case_ )
if cat == "Zs":
return True
return False
def _A( self , snake_case_ ):
lowercase ={}
with io.open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case_ ):
lowercase =line.rstrip('''\n''' )
lowercase =int(snake_case_ )
return token_to_idx
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =0
if os.path.isdir(snake_case_ ):
lowercase =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowercase =token_index
writer.write(token + '''\n''' )
index += 1
lowercase =os.path.join(snake_case_ , '''sentencepiece.bpe.model''' )
with open(snake_case_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (vocab_file,)
| 145 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : Tuple = False
class __magic_name__ ( unittest.TestCase ):
def _A( self , snake_case_=32 ):
set_seed(0 )
lowercase =UNetaDModel(sample_size=snake_case_ , in_channels=3 , out_channels=3 )
lowercase =torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def _A( self ):
lowercase ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase =DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
lowercase =DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=snake_case_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randn((4, 3, 32, 32) ).to(snake_case_ ) for _ in range(4 )]
lowercase =[torch.randint(0 , 10_00 , (4,) ).long().to(snake_case_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase =self.get_model_optimizer(resolution=32 )
model.train().to(snake_case_ )
for i in range(4 ):
optimizer.zero_grad()
lowercase =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase =model(snake_case_ , timesteps[i] ).sample
lowercase =torch.nn.functional.mse_loss(snake_case_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 145 | 1 |
from math import sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 1_00_00_00) -> int:
a = 0
a = 0
a = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer():
num_cuboids += (
min(__UpperCamelCase , sum_shortest_sides // 2)
- max(1 , sum_shortest_sides - max_cuboid_size)
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 515 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=A , )
assert hasattr(self , "env" )
def lowerCAmelCase_ ( self , A ) -> List[Any]:
'''simple docstring'''
a = {
"enabled": True,
"processes_per_host": 8,
}
a = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
a = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
a = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="py36" , )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
TrainingJobAnalytics(A ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self , A ) -> Tuple:
'''simple docstring'''
a = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , A )
| 515 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_A : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : str , *A : Tuple , **A : Tuple ) ->None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 130 |
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->int:
lowerCamelCase__ : Optional[Any] = (0, 0)
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
def __eq__( self : Optional[int] , A : Optional[Any] ) ->List[Any]:
return self.position == cell.position
def __lowerCamelCase ( self : List[str] ) ->int:
print(self.position )
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , A : List[str]=(5, 5) ) ->Optional[int]:
lowerCamelCase__ : int = np.zeros(A )
lowerCamelCase__ : Optional[int] = world_size[0]
lowerCamelCase__ : Optional[int] = world_size[1]
def __lowerCamelCase ( self : List[str] ) ->List[str]:
print(self.w )
def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->Optional[Any]:
lowerCamelCase__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCamelCase__ : List[Any] = cell.position[0]
lowerCamelCase__ : Union[str, Any] = cell.position[1]
lowerCamelCase__ : int = []
for n in neughbour_cord:
lowerCamelCase__ : Tuple = current_x + n[0]
lowerCamelCase__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCamelCase__ : List[Any] = Cell()
lowerCamelCase__ : Tuple = (x, y)
lowerCamelCase__ : List[Any] = cell
neighbours.append(A )
return neighbours
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[str] = []
_open.append(UpperCAmelCase )
while _open:
lowerCamelCase__ : Any = np.argmin([n.f for n in _open] )
lowerCamelCase__ : List[str] = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase ):
for c in _closed:
if c == n:
continue
lowerCamelCase__ : Any = current.g + 1
lowerCamelCase__ , lowerCamelCase__ : str = n.position
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = goal.position
lowerCamelCase__ : Optional[Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCamelCase__ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = []
while current.parent is not None:
path.append(current.position )
lowerCamelCase__ : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_A : Dict = Gridworld()
# Start position and goal
_A : Any = Cell()
_A : int = (0, 0)
_A : Optional[int] = Cell()
_A : Tuple = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
_A : int = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_A : List[Any] = 1
print(world.w)
| 130 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = self.dummy_uncond_unet
lowercase__ = ScoreSdeVeScheduler()
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = '''google/ncsnpp-church-256'''
lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
lowercase__ = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=UpperCamelCase_ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 | import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
lowercase = set(UpperCAmelCase )
return pairs
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<mask>" , **__lowerCamelCase : int , ) -> Any:
'''simple docstring'''
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
lowercase = vocab_file
lowercase = merges_file
lowercase = {}
lowercase = 0
lowercase = 1
lowercase = 2
lowercase = 3
self.add_from_file(__lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
lowercase = merges_handle.read().split('''\n''' )[:-1]
lowercase = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowercase = {}
def __a ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ) -> str:
'''simple docstring'''
return len(self.encoder )
def __a ( self : int ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : int , __lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase = tuple(__lowerCamelCase )
lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase ,lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCamelCase ):
try:
lowercase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCamelCase )
lowercase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCamelCase )
lowercase = '''@@ '''.join(__lowerCamelCase )
lowercase = word[:-4]
lowercase = word
return word
def __a ( self : List[str] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = []
lowercase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def __a ( self : Tuple , __lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self : str , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def __a ( self : str , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
lowercase = f.readlines()
for lineTmp in lines:
lowercase = lineTmp.strip()
lowercase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowercase = line[:idx]
lowercase = len(self.encoder )
| 604 | 0 |
from __future__ import annotations
def A_ ( A__ , A__ , A__ , ) -> Dict:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : int = regularization
a__ : int = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : List[str] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : Tuple = observations
a__ : int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Tuple = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[Any] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : str = LinearConstraint(lowercase , 0 , 0)
a__ : List[Any] = Bounds(0 , self.regularization)
a__ : Optional[int] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : str = l_star
# calculating mean offset of separation plane to points
a__ : Optional[int] = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : str = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case_ ( a_ ):
def snake_case_ ( self ):
a_ : List[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ):
with self.assertRaises(a_ ):
a_ : Tuple = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def snake_case_ ( self ):
with self.assertRaises(a_ ):
a_ : List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def snake_case_ ( self ):
a_ : int = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : List[Any] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def snake_case_ ( self ):
a_ : List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case_ ( self ):
a_ : str = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def snake_case_ ( self ):
a_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def snake_case_ ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : List[str] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def snake_case_ ( self ):
a_ : Dict = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def snake_case_ ( self ):
a_ : Any = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def snake_case_ ( self ):
import PIL.Image
a_ : List[Any] = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
a_ : Optional[int] = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
a_ , a_ : List[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : List[Any] = pa.BufferReader(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE__ )
a_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : List[str] = pa.BufferOutputStream()
a_ : Union[str, Any] = pa.schema(SCREAMING_SNAKE_CASE__ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__, schema=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a_ , a_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Optional[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> List[str]:
a_ : List[Any] = pa.BufferOutputStream()
a_ : Tuple = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__, features=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a_ : Tuple = pa.BufferReader(output.getvalue() )
a_ : Optional[Any] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE__ )
a_ : pa.Table = f.read_all()
a_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__, hash_salt="split_name", check_duplicates=SCREAMING_SNAKE_CASE__, ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=[1, 2] )
a_ , a_ : List[str] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
a_ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__, hash_salt="split_name", check_duplicates=SCREAMING_SNAKE_CASE__, ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
writer.write({"col_1": "foo", "col_2": 1}, key=10 )
writer.write({"col_1": "bar", "col_2": 2}, key=10 )
a_ , a_ : Dict = writer.finalize()
@pytest.mark.parametrize("writer_batch_size", [None, 2, 10] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__, hash_salt="split_name", check_duplicates=SCREAMING_SNAKE_CASE__, ) as writer:
writer.write({"col_1": "foo", "col_2": 1}, key=1 )
writer.write({"col_1": "bar", "col_2": 2}, key=2 )
a_ , a_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : Union[str, Any] = pa.BufferOutputStream()
a_ : Any = pa.schema(SCREAMING_SNAKE_CASE__ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__, schema=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
a_ , a_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Union[str, Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : List[Any] = pa.BufferOutputStream()
a_ : Optional[int] = pa.schema(SCREAMING_SNAKE_CASE__ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__, schema=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : int = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size", [None, 1, 10] )
@pytest.mark.parametrize(
"fields", [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Dict = pa.BufferOutputStream()
a_ : List[Any] = pa.schema(SCREAMING_SNAKE_CASE__ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__, schema=SCREAMING_SNAKE_CASE__, writer_batch_size=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
a_ , a_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : List[str] = {"col_1": pa.string(), "col_2": pa.intaa()}
a_ : Any = os.path.join(SCREAMING_SNAKE_CASE__, "test.arrow" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE__, schema=pa.schema(SCREAMING_SNAKE_CASE__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE__, metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE__, 1 )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> str:
if pa.types.is_list(SCREAMING_SNAKE_CASE__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
if isinstance(lst[0], SCREAMING_SNAKE_CASE__ ):
change_first_primitive_element_in_list(lst[0], SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[int] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype", [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[Any] = pa.array(TypedSequence(SCREAMING_SNAKE_CASE__, optimized_int_type=SCREAMING_SNAKE_CASE__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype", [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
], )
@pytest.mark.parametrize("sequence", [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
# in range
a_ : Any = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE__, col=SCREAMING_SNAKE_CASE__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a_ : Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
a_ : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE__, col=SCREAMING_SNAKE_CASE__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception", [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : Tuple = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
a_ : List[Any] = "mock://dataset-train.arrow"
with ArrowWriter(path=SCREAMING_SNAKE_CASE__, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(SCREAMING_SNAKE_CASE__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a_ , a_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> str:
a_ : Dict = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a_ : int = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files", [False, True] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
import PIL.Image
a_ : Optional[int] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE__, format="png" )
a_ : List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE__, features=Features({"image": Image()} ), embed_local_files=SCREAMING_SNAKE_CASE__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
a_ : Dict = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"], SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase_ ( ) -> Optional[Any]:
a_ : Tuple = pa.schema([pa.field("col_1", pa.string(), nullable=SCREAMING_SNAKE_CASE__ )] )
a_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE__ ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE__ )
assert writer._schema == pa.schema([pa.field("col_1", pa.string() )] ) | 237 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( a_ ):
__lowerCAmelCase = ["input_values", "attention_mask"]
def __init__( self , a_ = 1 , a_ = 1_6_0_0_0 , a_ = 0.0 , a_ = False , a_ = 8_0 , a_ = 1_6 , a_ = 6_4 , a_ = "hann_window" , a_ = 1.0 , a_ = 8_0 , a_ = 7_6_0_0 , a_ = 1e-10 , a_ = 2 , a_ = True , **a_ , ):
super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ )
a_ : Optional[Any] = do_normalize
a_ : Any = return_attention_mask
a_ : int = num_mel_bins
a_ : int = hop_length
a_ : List[str] = win_length
a_ : Dict = win_function
a_ : Optional[Any] = frame_signal_scale
a_ : List[str] = fmin
a_ : Any = fmax
a_ : str = mel_floor
a_ : int = reduction_factor
a_ : Tuple = win_length * sampling_rate // 1_0_0_0
a_ : int = hop_length * sampling_rate // 1_0_0_0
a_ : Dict = optimal_fft_length(self.sample_size )
a_ : int = (self.n_fft // 2) + 1
a_ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ )
a_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , a_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( a_ , a_ , a_ = 0.0 ):
if attention_mask is not None:
a_ : int = np.array(a_ , np.intaa )
a_ : Tuple = []
for vector, length in zip(a_ , attention_mask.sum(-1 ) ):
a_ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
a_ : Tuple = padding_value
normed_input_values.append(a_ )
else:
a_ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , a_ , ):
a_ : Optional[Any] = spectrogram(
a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , a_ = None , a_ = None , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , a_ = None , **a_ , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a_ : int = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
else:
a_ : Optional[Any] = None
if audio_target is not None:
a_ : Optional[Any] = self._process_audio(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , )
if inputs is None:
return inputs_target
else:
a_ : Dict = inputs_target["input_values"]
a_ : int = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a_ : List[Any] = decoder_attention_mask
return inputs
def snake_case_ ( self , a_ , a_ = False , a_ = False , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , **a_ , ):
a_ : List[str] = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a_ : Optional[int] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
a_ : Dict = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a_ : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a_ : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
a_ : Dict = [self._extract_mel_features(a_ ) for waveform in speech]
a_ : List[Any] = BatchFeature({"input_values": features} )
a_ : str = self.num_mel_bins
else:
a_ : List[str] = BatchFeature({"input_values": speech} )
a_ : Any = self.pad(
a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , )
a_ : Tuple = feature_size_hack
# convert input values to correct format
a_ : Union[str, Any] = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a_ : str = [np.asarray(a_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(a_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a_ : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a_ : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
a_ : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a_ : Union[str, Any] = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a_ : int = (
attention_mask
if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a_ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=a_ , padding_value=self.padding_value )
if return_tensors is not None:
a_ : Optional[Any] = padded_inputs.convert_to_tensors(a_ )
return padded_inputs
def snake_case_ ( self ):
a_ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a_ : List[str] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output | 237 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowerCAmelCase : Optional[Any] = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
__lowerCAmelCase : Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowerCAmelCase : Optional[int] = {"unk_token": "<unk>"}
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
__lowerCAmelCase : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : int = os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : Any = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
__lowerCAmelCase : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(lowerCAmelCase , return_tensors="""np""" )
__lowerCAmelCase : Dict = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Dict = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = "lower newer"
__lowerCAmelCase : List[str] = processor(text=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : str = "lower newer"
__lowerCAmelCase : Dict = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : Any = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Any = processor.batch_decode(lowerCAmelCase )
__lowerCAmelCase : Dict = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Dict = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
__lowerCAmelCase : Dict = "lower newer"
__lowerCAmelCase : Any = self.prepare_image_inputs()
__lowerCAmelCase : str = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ (__A : Union[str, Any] ) -> Any:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=lowerCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = model
__lowerCAmelCase : Optional[Any] = cache
__lowerCAmelCase : Optional[Any] = force
__lowerCAmelCase : Tuple = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 218 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A__ : Tuple , A__ : Any , A__ : List[Any] , A__ : Any , A__ : Optional[int] ):
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(A__ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=A__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_entity_vocab(A__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=A__ , rstrip=A__ )
SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A__ , A__ )
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE = LukeModel(config=A__ ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(A__ , strict=A__ )
if not (len(A__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"Missing keys {', '.join(A__ )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(A__ , task="entity_classification" )
SCREAMING_SNAKE_CASE = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE = (39, 42)
SCREAMING_SNAKE_CASE = tokenizer(A__ , entity_spans=[span] , add_prefix_space=A__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**A__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A__ ) )
model.save_pretrained(A__ )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = {}
with open(A__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(A__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 16 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = "van"
def __init__( self ,snake_case__=224 ,snake_case__=3 ,snake_case__=[7, 3, 3, 3] ,snake_case__=[4, 2, 2, 2] ,snake_case__=[64, 128, 320, 512] ,snake_case__=[3, 3, 12, 3] ,snake_case__=[8, 8, 4, 4] ,snake_case__="gelu" ,snake_case__=0.02 ,snake_case__=1E-6 ,snake_case__=1E-2 ,snake_case__=0.0 ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = patch_sizes
SCREAMING_SNAKE_CASE_ : str = strides
SCREAMING_SNAKE_CASE_ : str = hidden_sizes
SCREAMING_SNAKE_CASE_ : str = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = mlp_ratios
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[str] = dropout_rate
| 105 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return 1 if input_a == input_a else 0
def _snake_case ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 707 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = PriorTransformer
__lowerCAmelCase : Dict = 'hidden_states'
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : Union[str, Any] = 7
UpperCAmelCase : Any = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Any:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = 4
UpperCAmelCase : Any = 8
UpperCAmelCase : List[Any] = 7
UpperCAmelCase : Any = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return (4, 8)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return (4, 8)
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase : str = self.model_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCAmelCase : Dict = model.to(_SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCAmelCase : str = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Dict = output[0, :5].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase : Dict = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=77 , _SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
torch.manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = embedding_dim
UpperCAmelCase : Tuple = num_embeddings
UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.randn((batch_size, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_SCREAMING_SNAKE_CASE )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.get_dummy_seed_input(seed=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase : int = sample[0, :8].flatten().cpu()
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor(_SCREAMING_SNAKE_CASE )
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
| 359 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10**12 ):
"""simple docstring"""
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Tuple = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :Optional[int] = random.Random()
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Tuple=1.0 , lowerCAmelCase_ :Optional[int]=None , lowerCAmelCase_ :List[Any]=None )->Dict:
'''simple docstring'''
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[Any]=4_0_0 , _lowerCAmelCase : Optional[Any]=2_0_0_0 , _lowerCAmelCase : Tuple=1_0 , _lowerCAmelCase : Optional[int]=1_6_0 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Dict=4_0_0_0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[str]=True , ) -> str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
snake_case_ = feature_size
snake_case_ = chunk_length
snake_case_ = hop_length
def lowerCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
def _flatten(_lowerCAmelCase : Union[str, Any] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
snake_case_ = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
snake_case_ = feat_extract_first.to_dict()
snake_case_ = feat_extract_second.to_dict()
snake_case_ = feat_extract_first.mel_filters
snake_case_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
snake_case_ = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
snake_case_ = feat_extract_first.to_dict()
snake_case_ = feat_extract_second.to_dict()
snake_case_ = feat_extract_first.mel_filters
snake_case_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(_lowerCAmelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ = np.asarray(_lowerCAmelCase )
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
snake_case_ = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
snake_case_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs_truncated]
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
snake_case_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case_ = ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
# fmt: off
snake_case_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = WhisperFeatureExtractor()
snake_case_ = feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowerCAmelCase , atol=1e-4 ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = self._load_datasamples(1 )[0]
snake_case_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(_lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 283 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutLMv3FeatureExtractor"""]
_UpperCAmelCase = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = LxmertConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A : str = LxmertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 634 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase : Tuple = '\nHuman: <<task>>\n\nAssistant: '
lowercase : Union[str, Any] = 'huggingface-tools/default-prompts'
lowercase : int = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
A : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , snake_case__ ) is not None:
return prompt_or_repo_id
A : int = cached_file(
snake_case__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 634 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 400 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCamelCase_ ) -> datetime:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 100 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 400 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = IFInpaintingPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self ):
"""simple docstring"""
return self._get_dummy_components()
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def snake_case ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 675 |
'''simple docstring'''
import os
import sys
import unittest
a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""")
a_ : List[Any] = """
{0} = None
"""
a_ : Optional[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
a_ : str = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(UpperCamelCase )
lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(UpperCamelCase , "tokenizers" )
lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(UpperCamelCase , "tensorflow_text" )
lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" )
lowerCamelCase_ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCamelCase )
self.assertIn("tensorflow_text" , UpperCamelCase )
self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" )
lowerCamelCase_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCamelCase )
| 675 | 1 |
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = name
__lowercase = val
def __str__( self ) -> Dict:
'''simple docstring'''
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = {}
__lowercase = {}
__lowercase = self.build_heap(_lowercase )
def __getitem__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.get_value(_lowercase )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = len(_lowercase ) - 1
__lowercase = self.get_parent_idx(_lowercase )
for idx, i in enumerate(_lowercase ):
__lowercase = idx
__lowercase = i.val
for i in range(_lowercase , -1 , -1 ):
self.sift_down(_lowercase , _lowercase )
return array
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
while True:
__lowercase = self.get_left_child_idx(_lowercase ) # noqa: E741
__lowercase = self.get_right_child_idx(_lowercase )
__lowercase = idx
if l < len(_lowercase ) and array[l] < array[idx]:
__lowercase = l
if r < len(_lowercase ) and array[r] < array[smallest]:
__lowercase = r
if smallest != idx:
__lowercase = array[smallest], array[idx]
(
__lowercase
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowercase = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.get_parent_idx(_lowercase )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowercase = self.heap[idx], self.heap[p]
__lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowercase = p
__lowercase = self.get_parent_idx(_lowercase )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.heap[-1], self.heap[0]
__lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
self.heap.append(_lowercase )
__lowercase = len(self.heap ) - 1
__lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return len(self.heap ) == 0
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowercase = new_value
__lowercase = new_value
self.sift_up(self.idx_of_element[node] )
__a : Any = Node("""R""", -1)
__a : List[Any] = Node("""B""", 6)
__a : Union[str, Any] = Node("""A""", 3)
__a : Any = Node("""X""", 1)
__a : Tuple = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__a : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 | from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , a % b )
__lowercase = a // b
return (y, x - k * y)
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(lowercase , lowercase )
if b < 0:
__lowercase = (b % n + n) % n
return b
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True) | 522 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = (boundary[1] - boundary[0]) / steps
_A = boundary[0]
_A = boundary[1]
_A = make_points(__lowercase , __lowercase , __lowercase )
_A = 0.0
y += (h / 2.0) * f(__lowercase )
for i in x_i:
# print(i)
y += h * f(__lowercase )
y += (h / 2.0) * f(__lowercase )
return y
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = a + h
while x < (b - h):
yield x
_A = x + h
def __lowercase ( __lowercase ) -> List[Any]: # enter your function here
'''simple docstring'''
_A = (x - 0) * (x - 0)
return y
def __lowercase ( ) -> int:
'''simple docstring'''
_A = 0.0 # Lower bound of integration
_A = 1.0 # Upper bound of integration
_A = 10.0 # define number of steps or resolution
_A = [a, b] # define boundary of integration
_A = method_a(__lowercase , __lowercase )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 330 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase_ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowerCamelCase_ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowerCamelCase_ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ) -> Tuple:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowercase ( __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = torch.load(__lowercase , map_location="cpu" )
_A = {}
_A = checkpoint["time_embed.0.weight"]
_A = checkpoint["time_embed.0.bias"]
_A = checkpoint["time_embed.2.weight"]
_A = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
_A = checkpoint["label_emb.weight"]
_A = checkpoint["input_blocks.0.0.weight"]
_A = checkpoint["input_blocks.0.0.bias"]
_A = unet_config["down_block_types"]
_A = unet_config["layers_per_block"]
_A = unet_config["attention_head_dim"]
_A = unet_config["block_out_channels"]
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = "mid_block.resnets.0"
_A = "middle_block.0"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.attentions.0"
_A = "middle_block.1"
_A = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
_A = "mid_block.resnets.1"
_A = "middle_block.2"
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = 0
_A = unet_config["up_block_types"]
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
_A = checkpoint["out.0.weight"]
_A = checkpoint["out.0.bias"]
_A = checkpoint["out.2.weight"]
_A = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = strabool(args.class_cond)
lowerCamelCase_ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase_ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase_ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCamelCase_ = None
lowerCamelCase_ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase_ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase_ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase_ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase_ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCamelCase_ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 330 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase__ : Tuple = float('nan')
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Any:
A_ : Tuple = sys.stdout
A_ : List[str] = open(_lowerCamelCase , """a""" )
def __getattr__( self , _lowerCamelCase ) -> List[str]:
return getattr(self.stdout , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Any:
self.stdout.write(_lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , _lowerCamelCase , 0 , re.M ) )
def UpperCAmelCase ( a_=8_0 , a_=False ) -> List[Any]:
"""simple docstring"""
A_ : int = []
# deal with critical env vars
A_ : List[Any] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
A_ : Union[str, Any] = os.environ.get(a_ , a_ )
if val is not None:
cmd.append(F"{key}={val}" )
# python executable (not always needed if the script is executable)
A_ : int = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(a_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A_ : Union[str, Any] = []
A_ : Tuple = """"""
while len(a_ ) > 0:
current_line += F"{cmd.pop(0 )} "
if len(a_ ) == 0 or len(a_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(a_ )
A_ : Any = """"""
return "\\\n".join(a_ )
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Optional[int] = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
A_ : Tuple = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
A_ : str = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A_ : Union[str, Any] = subprocess.run(a_ , capture_output=a_ , text=a_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
A_ : List[Any] = variation.replace(""" """ , """-""" )
with open(Path(a_ ) / F"log.{prefix}.stdout.txt" , """w""" ) as f:
f.write(result.stdout )
with open(Path(a_ ) / F"log.{prefix}.stderr.txt" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , """r""" , encoding="""utf-8""" ) as f:
A_ : str = json.load(a_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> List[str]:
"""simple docstring"""
A_ : Union[str, Any] = []
A_ : List[Any] = []
A_ : int = F"{id}: {variation:<{longest_variation_len}}"
A_ : Optional[Any] = F"{preamble}: "
A_ : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(a_ ) , desc=a_ , leave=a_ ):
A_ : List[Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
A_ : Dict = single_run_metrics[target_metric_key]
if not math.isnan(a_ ):
metrics.append(a_ )
results.append(a_ )
outcome += "✓"
else:
outcome += "✘"
A_ : Optional[Any] = F"\33[2K\r{outcome}"
if len(a_ ) > 0:
A_ : Tuple = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A_ : Optional[int] = round(mean_metrics[target_metric_key] , 2 )
A_ : List[str] = F"{outcome} {mean_target}"
if len(a_ ) > 1:
results_str += F" {tuple(round(a_ , 2 ) for x in results )}"
print(a_ )
A_ : List[Any] = variation
return mean_metrics
else:
print(a_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Tuple = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n"
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
A_ : Any = pd.DataFrame(a_ )
A_ : int = """variation"""
A_ : int = """diff_%"""
A_ : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A_ : Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_ ):
# as a fallback, use the minimal value as the sentinel
A_ : Dict = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_ ):
A_ : Optional[int] = df.apply(
lambda a_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
A_ : Optional[int] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A_ : Tuple = df.reindex(a_ , axis="""columns""" ) # reorder cols
# capitalize
A_ : List[Any] = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
A_ : str = df.rename(lambda a_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
A_ : List[str] = df.rename(lambda a_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
A_ : str = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt=""".2f""" )]
print("""\n\n""".join(a_ ) )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=a_ , type=a_ , required=a_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=a_ , type=a_ , nargs="""+""" , required=a_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=a_ , type=a_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=a_ , type=a_ , required=a_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=a_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=a_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=a_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=a_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
A_ : Tuple = parser.parse_args()
A_ : Dict = args.output_dir
Path(a_ ).mkdir(exist_ok=a_ )
A_ : List[Any] = get_base_command(a_ , a_ )
# split each dimension into its --foo variations
A_ : Optional[int] = [list(map(str.strip , re.split(R"""\|""" , a_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A_ : List[Any] = list(map(str.strip , map(""" """.join , itertools.product(*a_ ) ) ) )
A_ : List[Any] = max(len(a_ ) for x in variations )
# split wanted keys
A_ : Dict = args.report_metric_keys.split()
# capture prints into a log file for convenience
A_ : Dict = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(F"and this script's output is also piped into {report_fn}" )
A_ : Any = Tee(a_ )
print(F"\n*** Running {len(a_ )} benchmarks:" )
print(F"Base command: {' '.join(a_ )}" )
A_ : Any = """variation"""
A_ : Dict = []
for id, variation in enumerate(tqdm(a_ , desc="""Total completion: """ , leave=a_ ) ):
A_ : Optional[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ) )
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_ )
if __name__ == "__main__":
main()
| 385 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
A_ : Any = [2, 1, 2, -1]
A_ : Tuple = [1, 2, 3, 4]
def UpperCAmelCase_ ( self ) -> list[float]:
A_ : List[str] = len(self.first_signal )
A_ : Any = len(self.second_signal )
A_ : List[Any] = max(_lowerCamelCase , _lowerCamelCase )
# create a zero matrix of max_length x max_length
A_ : Optional[Any] = [[0] * max_length for i in range(_lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowerCamelCase ):
A_ : str = deque(self.second_signal )
rotated_signal.rotate(_lowerCamelCase )
for j, item in enumerate(_lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
A_ : Optional[Any] = np.matmul(np.transpose(_lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 385 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = DebertaTokenizer
lowerCAmelCase = True
lowerCAmelCase = DebertaTokenizerFast
def _a ( self : int ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__SCREAMING_SNAKE_CASE = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '[UNK]'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = 'lower newer'
return input_text, output_text
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = 'lower newer'
__SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer('''Hello''' , '''World''' )
__SCREAMING_SNAKE_CASE = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE__ )
@slow
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__SCREAMING_SNAKE_CASE = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _a ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__SCREAMING_SNAKE_CASE = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) for seq in encoding['input_ids']]
# fmt: off
__SCREAMING_SNAKE_CASE = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__SCREAMING_SNAKE_CASE = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE__ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 482 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: Dict = tmp_path / 'file.csv'
snake_case: Optional[int] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(__A , 'w' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: Optional[Any] = tmp_path / 'malformed_file.csv'
snake_case: List[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(__A , 'w' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def lowerCAmelCase_ ( __A : Dict , __A : Tuple ):
'''simple docstring'''
snake_case: Optional[int] = tmp_path / 'csv_with_image.csv'
snake_case: List[Any] = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(__A , 'w' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: Tuple = tmp_path / 'csv_with_label.csv'
snake_case: str = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(__A , 'w' ) as f:
f.write(__A )
return str(__A )
@pytest.fixture
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: Union[str, Any] = tmp_path / 'csv_with_int_list.csv'
snake_case: Any = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(__A , 'w' ) as f:
f.write(__A )
return str(__A )
def lowerCAmelCase_ ( __A : Tuple , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Union[str, Any] = Csv()
snake_case: Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__A , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(__A ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( __A : Union[str, Any] ):
'''simple docstring'''
with open(__A , encoding='utf-8' ) as f:
snake_case: Tuple = f.read().splitlines()[1]
snake_case: Union[str, Any] = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
snake_case: List[Any] = csv._generate_tables([[csv_file_with_image]] )
snake_case: Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
snake_case: Union[str, Any] = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( __A : Optional[int] ):
'''simple docstring'''
with open(__A , encoding='utf-8' ) as f:
snake_case: Tuple = f.read().splitlines()[1:]
snake_case: str = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
snake_case: List[str] = csv._generate_tables([[csv_file_with_label]] )
snake_case: Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
snake_case: List[Any] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(__A ) for label in labels]
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
snake_case: Tuple = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda __A : [int(__A ) for i in x.split()]} )
snake_case: str = csv._generate_tables([[csv_file_with_int_list]] )
snake_case: Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
snake_case: List[Any] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 329 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = s.rsplit(_UpperCamelCase , _UpperCamelCase )
return new.join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_ = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
UpperCAmelCase_ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
UpperCAmelCase_ = rreplace(_UpperCamelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
UpperCAmelCase_ = rreplace(_UpperCamelCase , '''.b''' , '''.bias''' , 1 )
UpperCAmelCase_ = value.float()
return upgrade
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase_ = Encoder()
if os.path.exists(_UpperCamelCase ):
UpperCAmelCase_ = torch.load(_UpperCamelCase )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = ckpt.state_dict()
encoder.load_state_dict(_UpperCamelCase )
if config_path is not None:
UpperCAmelCase_ = FlavaImageCodebookConfig.from_pretrained(_UpperCamelCase )
else:
UpperCAmelCase_ = FlavaImageCodebookConfig()
UpperCAmelCase_ = FlavaImageCodebook(_UpperCamelCase ).eval()
UpperCAmelCase_ = encoder.state_dict()
UpperCAmelCase_ = upgrade_state_dict(_UpperCamelCase )
hf_model.load_state_dict(_UpperCamelCase )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(_UpperCamelCase )
UpperCAmelCase_ = count_parameters(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_UpperCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ : Tuple = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 43 | '''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a : Dict = False
class lowercase(unittest.TestCase ):
def lowercase__ ( self , __SCREAMING_SNAKE_CASE=3_2 ) -> str:
"""simple docstring"""
set_seed(0 )
a__ = UNetaDModel(sample_size=__SCREAMING_SNAKE_CASE , in_channels=3 , out_channels=3 )
a__ = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
a__ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__SCREAMING_SNAKE_CASE , )
a__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__SCREAMING_SNAKE_CASE , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
a__ = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
a__ = [torch.randn((4, 3, 3_2, 3_2) ).to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
a__ = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
# train with a DDPM scheduler
a__ , a__ = self.get_model_optimizer(resolution=3_2 )
model.train().to(__SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
a__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
a__ = model(__SCREAMING_SNAKE_CASE , timesteps[i] ).sample
a__ = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
a__ , a__ = self.get_model_optimizer(resolution=3_2 )
model.train().to(__SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
a__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
a__ = model(__SCREAMING_SNAKE_CASE , timesteps[i] ).sample
a__ = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
| 273 |
"""simple docstring"""
from math import ceil
def __magic_name__ ( UpperCamelCase : int = 1001 ) -> int:
a__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ = 2 * i + 1
a__ = 2 * i
a__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 273 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_A = get_logger(__name__)
_A = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase :
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _lowerCAmelCase ( __a ):
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> jnp.ndarray:
for processor in self:
lowerCAmelCase_ = inspect.signature(processor.__call__ ).parameters
if len(_UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Tuple:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
lowerCAmelCase_ = temperature
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores / self.temperature
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> Union[str, Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
lowerCAmelCase_ = top_p
lowerCAmelCase_ = filter_value
lowerCAmelCase_ = min_tokens_to_keep
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , scores.shape[-1] )
lowerCAmelCase_ = jnp.full_like(_UpperCamelCase , self.filter_value )
lowerCAmelCase_ = jax.nn.softmax(_UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
lowerCAmelCase_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCAmelCase_ = jnp.roll(_UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_UpperCamelCase )
# min tokens to keep
lowerCAmelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jax.lax.sort_key_val(_UpperCamelCase , _UpperCamelCase )[-1]
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = -float("Inf" ) , _UpperCamelCase = 1 ) -> List[Any]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
lowerCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = filter_value
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ = scores.shape
lowerCAmelCase_ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCAmelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCAmelCase_ , lowerCAmelCase_ = lax.top_k(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.broadcast_to((jnp.arange(_UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCAmelCase_ = topk_scores.flatten()
lowerCAmelCase_ = topk_indices.flatten() + shift
lowerCAmelCase_ = next_scores_flat.at[topk_indices_flat].set(_UpperCamelCase )
lowerCAmelCase_ = next_scores_flat.reshape(_UpperCamelCase , _UpperCamelCase )
return next_scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = bos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = max_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
lowerCAmelCase_ = min_length
lowerCAmelCase_ = eos_token_id
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowerCAmelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = list(_UpperCamelCase )
lowerCAmelCase_ = begin_index
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCAmelCase_ = jnp.where(_UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCamelCase )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = list(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
lowerCAmelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = dict(_UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCAmelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCAmelCase_ = force_token_array.at[index].set(_UpperCamelCase )
lowerCAmelCase_ = jnp.intaa(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> jnp.ndarray:
def _force_token(_UpperCamelCase ):
lowerCAmelCase_ = scores.shape[0]
lowerCAmelCase_ = self.force_token_array[generation_idx]
lowerCAmelCase_ = jnp.ones_like(_UpperCamelCase , dtype=scores.dtype ) * -float("inf" )
lowerCAmelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCAmelCase_ = lax.dynamic_update_slice(_UpperCamelCase , _UpperCamelCase , (0, current_token) )
return new_scores
lowerCAmelCase_ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCamelCase ) , lambda: scores , ) , )
return scores
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = generate_config.eos_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id
lowerCAmelCase_ = generate_config.no_timestamps_token_id + 1
lowerCAmelCase_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCamelCase , "max_initial_timestamp_index" ):
lowerCAmelCase_ = generate_config.max_initial_timestamp_index
else:
lowerCAmelCase_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCAmelCase_ = model_config.vocab_size
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCAmelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = jnp.where((cur_len - self.begin_index) < 2 , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCamelCase , _UpperCamelCase , )
return jnp.where(
_UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(cur_len == self.begin_index , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCamelCase , )
lowerCAmelCase_ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCAmelCase_ = jnp.where(
_UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCAmelCase_ = jax.nn.log_softmax(_UpperCamelCase , axis=-1 )
def handle_cumulative_probs(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCAmelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCamelCase , )
lowerCAmelCase_ = jax.vmap(_UpperCamelCase )(_UpperCamelCase , _UpperCamelCase )
return scores
| 279 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 279 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''YolosFeatureExtractor''']
_SCREAMING_SNAKE_CASE = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
@slow
def A_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowercase ) , 0 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
# Check that tokenizer_type ≠ model_type
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def A_ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@require_tokenizers
def A_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt''' ) )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt''' ) )
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowercase , __lowercase )
def A_ ( self : Tuple ):
'''simple docstring'''
with pytest.raises(__lowercase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def A_ ( self : Dict ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCAmelCase : List[Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowercase , __lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def A_ ( self : Any ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = TOKENIZER_MAPPING.values()
__UpperCAmelCase : str = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase )
@require_tokenizers
def A_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowercase ) , __lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowercase )
@require_tokenizers
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowercase )
__UpperCAmelCase : Optional[Any] = '''Hello, world. How are you?'''
__UpperCAmelCase : Optional[int] = tokenizer.tokenize(__lowercase )
self.assertEqual('''[UNK]''' , tokens[0] )
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.tokenize(__lowercase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowercase ) , __lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase )
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = get_tokenizer_config('''bert-base-cased''' )
__UpperCAmelCase : List[str] = config.pop('''_commit_hash''' , __lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCAmelCase : Union[str, Any] = get_tokenizer_config(__lowercase )
self.assertDictEqual(__lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : List[str] = get_tokenizer_config(__lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def A_ ( self : List[str] ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
__UpperCAmelCase : Tuple = CustomTokenizer.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def A_ ( self : Tuple ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __lowercase )
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : Any = BertTokenizerFast.from_pretrained(__lowercase )
bert_tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Optional[Any] = CustomTokenizerFast.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A_ ( self : Dict ):
'''simple docstring'''
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def A_ ( self : Optional[Any] ):
'''simple docstring'''
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Optional[Any] = False
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Tuple = NewTokenizer
_A : Dict = False
try:
AutoConfig.register('''custom''' , __lowercase )
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase )
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase )
# If remote code is not set, the default is to use local
__UpperCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase , use_fast=__lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def A_ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def A_ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(__lowercase , revision='''aaaaaa''' )
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 717 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Dict = nn.Linear(3 , 4 )
__UpperCAmelCase : Union[str, Any] = nn.BatchNormad(4 )
__UpperCAmelCase : List[str] = nn.Linear(4 , 5 )
def A_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__lowercase ) ) )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Union[str, Any] , __lowercase : Optional[int] , *__lowercase : str , **__lowercase : Optional[int] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Any , __lowercase : Tuple , __lowercase : Any ):
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Optional[int] = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
self.assertEqual(test_model._hf_hook , __lowercase )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = ModelForTest()
__UpperCAmelCase : Tuple = ModelHook()
add_hook_to_module(__lowercase , __lowercase )
add_hook_to_module(__lowercase , __lowercase , append=__lowercase )
self.assertEqual(isinstance(test_model._hf_hook , __lowercase ) , __lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(__lowercase )
self.assertFalse(hasattr(__lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(__lowercase , '''_old_forward''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
__UpperCAmelCase : Tuple = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = test_model(x + 1 )
__UpperCAmelCase : Optional[Any] = test_model(x + 2 )
__UpperCAmelCase : Optional[int] = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : int = PreForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Any = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Tuple = test_model(__lowercase )
assert torch.allclose(__lowercase , __lowercase , atol=1e-5 )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = ModelForTest()
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : Tuple = test_model(__lowercase )
__UpperCAmelCase : int = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__UpperCAmelCase : str = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : List[str] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Dict = test_model(__lowercase )
assert torch.allclose(__lowercase , output + 2 , atol=1e-5 )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
__UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
__UpperCAmelCase : str = test_model(__lowercase )
__UpperCAmelCase : Union[str, Any] = PostForwardHook()
add_hook_to_module(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = test_model(__lowercase )
self.assertTrue(torch.allclose(__lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = test_model(__lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : Any = model(__lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowercase , AlignDevicesHook(io_same_device=__lowercase ) )
__UpperCAmelCase : List[Any] = torch.randn(2 , 3 ).to(0 )
__UpperCAmelCase : int = model(__lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Tuple = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[int] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : int = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__UpperCAmelCase : str = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 )
__UpperCAmelCase : List[Any] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : Optional[Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Dict = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Optional[int] = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowercase , execution_device=__lowercase , offload=__lowercase , offload_buffers=__lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : Dict = torch.randn(2 , 3 )
__UpperCAmelCase : str = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__UpperCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__UpperCAmelCase : Optional[Any] = torch.device(__lowercase )
self.assertEqual(model.batchnorm.running_mean.device , __lowercase )
__UpperCAmelCase : Any = torch.randn(2 , 3 )
__UpperCAmelCase : Dict = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowercase , execution_device=__lowercase , offload=__lowercase , weights_map=model.state_dict() , offload_buffers=__lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__UpperCAmelCase : List[str] = torch.randn(2 , 3 )
__UpperCAmelCase : Optional[int] = model(__lowercase )
self.assertEqual(output.device , __lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) | 374 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase, _lowerCamelCase )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dct.pop(_lowerCamelCase )
_UpperCamelCase = val
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(_lowerCamelCase, stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = BitConfig(
global_padding='''same''', layer_type='''bottleneck''', depths=(3, 4, 9), out_features=['''stage3'''], embedding_dynamic_padding=_lowerCamelCase, )
_UpperCamelCase = ViTHybridConfig(backbone_config=_lowerCamelCase, image_size=3_84, num_labels=10_00 )
_UpperCamelCase = False
# load original model from timm
_UpperCamelCase = timm.create_model(_lowerCamelCase, pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_UpperCamelCase = create_rename_keys(_lowerCamelCase, _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(_lowerCamelCase, _lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCamelCase = ViTHybridModel(_lowerCamelCase ).eval()
else:
_UpperCamelCase = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
_UpperCamelCase = create_transform(**resolve_data_config({}, model=_lowerCamelCase ) )
_UpperCamelCase = transform.transforms
_UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase = ViTHybridImageProcessor(
do_resize=_lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=_lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=_lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_UpperCamelCase = prepare_img()
_UpperCamelCase = transform(_lowerCamelCase ).unsqueeze(0 )
_UpperCamelCase = processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase, _lowerCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase = model(_lowerCamelCase )
_UpperCamelCase = outputs.logits
print('''Predicted class:''', logits.argmax(-1 ).item() )
if base_model:
_UpperCamelCase = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase, outputs.pooler_output, atol=1e-3 )
else:
_UpperCamelCase = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase, outputs.logits, atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
_a = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 549 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple =["""input_features""", """attention_mask"""]
def __init__( self : str, __lowercase : Optional[int]=80, __lowercase : Optional[int]=1_6000, __lowercase : Any=80, __lowercase : Optional[Any]=0.0, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=True, __lowercase : Any=True, **__lowercase : str, ):
super().__init__(feature_size=__lowercase, sampling_rate=__lowercase, padding_value=__lowercase, **__lowercase )
lowercase__ = num_mel_bins
lowercase__ = do_ceptral_normalize
lowercase__ = normalize_means
lowercase__ = normalize_vars
lowercase__ = True
def A__ ( self : Dict, __lowercase : np.ndarray, ):
lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase__ = torch.from_numpy(__lowercase ).unsqueeze(0 )
lowercase__ = ta_kaldi.fbank(__lowercase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( __lowercase : np.ndarray, __lowercase : int, __lowercase : Optional[bool] = True, __lowercase : Optional[bool] = True, __lowercase : float = 0.0, ):
# make sure we normalize float32 arrays
if normalize_means:
lowercase__ = x[:input_length].mean(axis=0 )
lowercase__ = np.subtract(__lowercase, __lowercase )
if normalize_vars:
lowercase__ = x[:input_length].std(axis=0 )
lowercase__ = np.divide(__lowercase, __lowercase )
if input_length < x.shape[0]:
lowercase__ = padding_value
# make sure array is in float32
lowercase__ = x.astype(np.floataa )
return x
def A__ ( self : Union[str, Any], __lowercase : List[np.ndarray], __lowercase : Optional[np.ndarray] = None ):
lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase, __lowercase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(__lowercase, __lowercase )
]
def __call__( self : Union[str, Any], __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], __lowercase : Union[bool, str, PaddingStrategy] = False, __lowercase : Optional[int] = None, __lowercase : bool = False, __lowercase : Optional[int] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, **__lowercase : Optional[Any], ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(__lowercase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(__lowercase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(__lowercase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase, np.ndarray ):
lowercase__ = np.asarray(__lowercase, dtype=np.floataa )
elif isinstance(__lowercase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [raw_speech]
# extract fbank features
lowercase__ = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
lowercase__ = BatchFeature({"input_features": features} )
lowercase__ = self.pad(
__lowercase, padding=__lowercase, max_length=__lowercase, truncation=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, **__lowercase, )
# make sure list is in array format
lowercase__ = padded_inputs.get("input_features" )
if isinstance(input_features[0], __lowercase ):
lowercase__ = [np.asarray(__lowercase, dtype=np.floataa ) for feature in input_features]
lowercase__ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowercase__ = [np.asarray(__lowercase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase__ = (
np.array(__lowercase, dtype=np.intaa )
if self._get_padding_strategies(__lowercase, max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__ = self.normalize(
padded_inputs["input_features"], attention_mask=__lowercase )
if return_tensors is not None:
lowercase__ = padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 37 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowercase__ :
'''simple docstring'''
A_ : Optional[Any] = None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_ )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowercase_ )
_SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class.from_json_file(lowercase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
_SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class.from_pretrained(lowercase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class()
self.assertIsNotNone(lowercase_ )
| 533 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313 | 0 |
from __future__ import annotations
def a (_lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ):
if start is None:
SCREAMING_SNAKE_CASE_ = 0
if end is None:
SCREAMING_SNAKE_CASE_ = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717 |
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
while len(_lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase ), max(_lowerCAmelCase )
start.append(_lowerCAmelCase )
end.append(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 89 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = LEDTokenizer
UpperCAmelCase__ : Union[str, Any] = LEDTokenizerFast
UpperCAmelCase__ : int = True
def snake_case_ ( self ) -> Tuple:
super().setUp()
UpperCamelCase : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase : str = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase : List[str] = {'unk_token': '<unk>'}
UpperCamelCase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def snake_case_ ( self ) -> Any:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def snake_case_ ( self ) -> Dict:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase : Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE_, max_length=len(SCREAMING_SNAKE_CASE_ ), padding=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
UpperCamelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIn('input_ids', SCREAMING_SNAKE_CASE_ )
self.assertIn('attention_mask', SCREAMING_SNAKE_CASE_ )
self.assertNotIn('labels', SCREAMING_SNAKE_CASE_ )
self.assertNotIn('decoder_attention_mask', SCREAMING_SNAKE_CASE_ )
@require_torch
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : str = tokenizer(text_target=SCREAMING_SNAKE_CASE_, max_length=32, padding='max_length', return_tensors='pt' )
self.assertEqual(32, targets['input_ids'].shape[1] )
@require_torch
def snake_case_ ( self ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : int = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'], padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape, (2, 5122) )
@require_torch
def snake_case_ ( self ) -> Dict:
UpperCamelCase : List[str] = ['A long paragraph for summarization.']
UpperCamelCase : Tuple = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
UpperCamelCase : str = tokenizer(text_target=SCREAMING_SNAKE_CASE_, return_tensors='pt' )
UpperCamelCase : List[Any] = inputs['input_ids']
UpperCamelCase : str = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case_ ( self ) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase : Dict = ['Summary of the text.', 'Another summary.']
UpperCamelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['input_ids']]
UpperCamelCase : Dict = tokenizer.pad(SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(outputs['global_attention_mask'], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
pass
def snake_case_ ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = 'A, <mask> AllenNLP sentence.'
UpperCamelCase : int = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
UpperCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 40 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCAmelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = """sgugger/tiny-distilbert-classification"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , torchscript=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# set architectures equal to `None`
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """sequential""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """cumulative""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """current""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
| 711 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( UpperCamelCase : list[list[float]] ):
UpperCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Any = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : Optional[int] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : str = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Any = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : Tuple = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 359 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A :Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A :Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class _a :
'''simple docstring'''
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A :str = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Train language if it is different from the evaluation language."} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A :Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A :Optional[bool] = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A :bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a__ , a__ , a__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ : int = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
datasets.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
a__ : Any = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
a__ : List[str] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : List[Any] = train_dataset.features["label"].names
if training_args.do_eval:
a__ : Tuple = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : int = eval_dataset.features["label"].names
if training_args.do_predict:
a__ : Dict = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Optional[Any] = predict_dataset.features["label"].names
# Labels
a__ : List[Any] = len(__UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase ): label for i, label in enumerate(__UpperCamelCase )} , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
a__ : Dict = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a__ : Optional[Any] = False
def preprocess_function(__UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
a__ : int = min(len(__UpperCamelCase ) , data_args.max_train_samples )
a__ : Tuple = train_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
a__ : Optional[int] = train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCamelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a__ : Optional[int] = min(len(__UpperCamelCase ) , data_args.max_eval_samples )
a__ : Optional[Any] = eval_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
a__ : List[Any] = eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
a__ : int = min(len(__UpperCamelCase ) , data_args.max_predict_samples )
a__ : List[str] = predict_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
a__ : Union[str, Any] = predict_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
a__ : List[Any] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase ):
a__ : Dict = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase ) else p.predictions
a__ : Any = np.argmax(__UpperCamelCase , axis=1 )
return metric.compute(predictions=__UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a__ : List[Any] = default_data_collator
elif training_args.fpaa:
a__ : List[Any] = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 )
else:
a__ : Tuple = None
# Initialize our Trainer
a__ : List[str] = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
a__ : str = None
if training_args.resume_from_checkpoint is not None:
a__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ : Optional[int] = last_checkpoint
a__ : Any = trainer.train(resume_from_checkpoint=__UpperCamelCase )
a__ : Tuple = train_result.metrics
a__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase )
)
a__ : Optional[int] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __UpperCamelCase )
trainer.save_metrics("train" , __UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Optional[int] = trainer.evaluate(eval_dataset=__UpperCamelCase )
a__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase )
a__ : List[Any] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("eval" , __UpperCamelCase )
trainer.save_metrics("eval" , __UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
a__ , a__ , a__ : int = trainer.predict(__UpperCamelCase , metric_key_prefix="predict" )
a__ : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase )
)
a__ : Union[str, Any] = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("predict" , __UpperCamelCase )
trainer.save_metrics("predict" , __UpperCamelCase )
a__ : Dict = np.argmax(__UpperCamelCase , axis=1 )
a__ : str = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(__UpperCamelCase ):
a__ : int = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 191 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase = getLogger(__name__)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 8 , __UpperCamelCase = 10_24 , __UpperCamelCase="val" , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase="summarization" , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase = None , __UpperCamelCase="" , **__UpperCamelCase , ) -> Dict:
a__ : Any = str(__UpperCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__UpperCamelCase )
a__ : int = Path(__UpperCamelCase )
a__ : List[Any] = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(__UpperCamelCase )
a__ : Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).cuda()
if fpaa:
a__ : Optional[int] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCamelCase , __UpperCamelCase ) # update config with task specific params
a__ : Optional[Any] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
a__ : Optional[Any] = num_return_sequences
a__ : Any = AutoTokenizer.from_pretrained(__UpperCamelCase )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
a__ : Optional[Any] = tokenizer.model_max_length
if prefix is None:
a__ : str = prefix or getattr(model.config , "prefix" , "" ) or ""
a__ : str = SeqaSeqDataset(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_target_length=10_24 , type_path=__UpperCamelCase , n_obs=__UpperCamelCase , prefix=__UpperCamelCase , **__UpperCamelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
a__ : Any = ds.make_sortish_sampler(__UpperCamelCase , distributed=__UpperCamelCase , add_extra_examples=__UpperCamelCase , shuffle=__UpperCamelCase )
a__ : Optional[int] = DataLoader(__UpperCamelCase , sampler=__UpperCamelCase , batch_size=__UpperCamelCase , collate_fn=ds.collate_fn )
a__ : Optional[int] = []
for batch in tqdm(__UpperCamelCase ):
a__ : Optional[Any] = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , **__UpperCamelCase , )
a__ : Any = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
a__ : Dict = batch["ids"]
if num_return_sequences > 1:
a__ : int = chunks(__UpperCamelCase , __UpperCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCamelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__UpperCamelCase , __UpperCamelCase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE( ) -> List[str]:
a__ : List[str] = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__UpperCamelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__UpperCamelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__UpperCamelCase , default=__UpperCamelCase )
parser.add_argument(
"--type_path" , type=__UpperCamelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__UpperCamelCase , default=8 , required=__UpperCamelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=__UpperCamelCase , default=-1 , required=__UpperCamelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__UpperCamelCase , default=1 , required=__UpperCamelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__UpperCamelCase , default=6_00 , required=__UpperCamelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase )
parser.add_argument("--tgt_lang" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase )
parser.add_argument(
"--prefix" , type=__UpperCamelCase , required=__UpperCamelCase , default=__UpperCamelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
a__ : str = time.time()
a__ , a__ : Dict = parser.parse_known_args()
a__ : Optional[Any] = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
a__ : List[str] = Path(args.save_dir + "_tmp" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) # this handles locking.
a__ : List[Any] = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
a__ : List[Any] = {}
if args.src_lang is not None:
a__ : str = args.src_lang
if args.tgt_lang is not None:
a__ : int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCamelCase )
a__ , a__ : Tuple = eval_data_dir(
args.data_dir , __UpperCamelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCamelCase , **__UpperCamelCase , )
if args.local_rank <= 0:
a__ : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCamelCase )
a__ : Optional[int] = gather_results_from_each_node(__UpperCamelCase , __UpperCamelCase , args.sync_timeout )
a__ : Tuple = combine_partial_results(__UpperCamelCase )
if args.num_return_sequences > 1:
a__ : Optional[int] = save_dir.joinpath("pseudolabel_results.json" )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(__UpperCamelCase , __UpperCamelCase )
return
a__ : Any = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__UpperCamelCase ) as f:
a__ : Optional[int] = [x.rstrip() for x in f.readlines()][: len(__UpperCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
a__ : Any = "translation" in args.task
a__ : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
a__ : List[Any] = "bleu" if calc_bleu else "rouge"
a__ : Dict = score_fn(__UpperCamelCase , __UpperCamelCase )
a__ : int = len(__UpperCamelCase )
a__ : Union[str, Any] = time.time() - start_time
a__ : List[Any] = round(runtime / metrics["n_obs"] , 4 )
a__ : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
a__ : int = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
print(__UpperCamelCase )
write_txt_file(__UpperCamelCase , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(__UpperCamelCase , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(__UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> List:
a__ : Optional[Any] = []
for partial_result in partial_results:
records.extend(__UpperCamelCase )
a__ : Tuple = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x["id"] )
a__ : Tuple = [x["pred"] for x in records]
return preds
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
a__ : Optional[Any] = time.time()
logger.info("waiting for all nodes to finish" )
a__ : int = None
while (time.time() - start_wait) < timeout:
a__ : List[str] = list(save_dir.glob("rank_*.json" ) )
if len(__UpperCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
a__ : int = lmap(__UpperCamelCase , __UpperCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 191 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A : List[str] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
A : List[Any] = 10
A : Optional[Any] = 256
def snake_case_ ( a__ : List[str] ):
"""simple docstring"""
if len(a__ ) < MIN_NUM_TOKENS:
return None
__lowercase = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( a__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE:
def __init__( self , *,
lowerCamelCase__ = 0.85 , ) -> int:
"""simple docstring"""
__lowercase = duplication_jaccard_threshold
__lowercase = NUM_PERM
__lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase = defaultdict(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
"""simple docstring"""
__lowercase = self._index.query(lowerCamelCase__ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase__ )
def snake_case__ ( self ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase = [base] + list(lowerCamelCase__ )
# reformat the cluster to be a list of dict
__lowercase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase__ )
return duplicate_clusters
def snake_case__ ( self , lowerCamelCase__ ) -> None:
"""simple docstring"""
__lowercase = self.get_duplicate_clusters()
with open(lowerCamelCase__ , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def snake_case_ ( a__ : Any ):
"""simple docstring"""
__lowercase ,__lowercase = element
__lowercase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( a__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(a__ ,max_queue_size=1_00_00 ) ,chunksize=1_00 ,):
if data is not None:
yield data
def snake_case_ ( a__ : Type[Dataset] ,a__ : float ):
"""simple docstring"""
__lowercase = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) ,max_queue_size=1_00 ) ):
di.add(a__ ,a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( a__ : str ,a__ : str ):
"""simple docstring"""
__lowercase = get_tokens(a__ )
__lowercase = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A : Dict = None
def snake_case_ ( a__ : Any ,a__ : Dict ):
"""simple docstring"""
__lowercase = []
for elementa in cluster:
__lowercase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a__ ,a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase = 1
extremes.append(a__ )
return extremes
def snake_case_ ( a__ : Dict ,a__ : Dict ,a__ : int ):
"""simple docstring"""
global _shared_dataset
__lowercase = dataset
__lowercase = []
__lowercase = partial(_find_cluster_extremes_shared ,jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ ,a__ ,) ,total=len(a__ ) ,):
extremes_list.append(a__ )
return extremes_list
def snake_case_ ( a__ : Type[Dataset] ,a__ : float = 0.8_5 ):
"""simple docstring"""
__lowercase = make_duplicate_clusters(a__ ,a__ )
__lowercase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase = {}
__lowercase = find_extremes(a__ ,a__ ,a__ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase = element
__lowercase = duplicate_indices - set(extreme_dict.keys() )
__lowercase = dataset.filter(lambda a__ ,a__ : idx not in remove_indices ,with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(a__ )}' )
print(f'Number of duplicate clusters: {len(a__ )}' )
print(f'Files in duplicate cluster: {len(a__ )}' )
print(f'Unique files in duplicate cluster: {len(a__ )}' )
print(f'Filtered dataset size: {len(a__ )}' )
return ds_filter, duplicate_clusters
| 721 |
'''simple docstring'''
def snake_case_ ( a__ : int = 1_00 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i in range(1 ,n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 626 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCamelCase ( __a = "isbn/0140328726" ):
SCREAMING_SNAKE_CASE_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
SCREAMING_SNAKE_CASE_ = F'{olid} is not a valid Open Library olid'
raise ValueError(__a )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
SCREAMING_SNAKE_CASE_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = ''', '''.join(__a )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCAmelCase__ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 626 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , **__lowercase) -> Optional[int]:
super().__init__(**__lowercase)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
self.check_model_type(__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Optional[Any]:
__UpperCamelCase :int = {}
__UpperCamelCase :Union[str, Any] = {}
__UpperCamelCase :Optional[Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
__UpperCamelCase :int = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__UpperCamelCase :List[Any] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__UpperCamelCase :Dict = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__UpperCamelCase :Tuple = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__UpperCamelCase :List[Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__UpperCamelCase :Optional[int] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__UpperCamelCase :str = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__UpperCamelCase :int = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__UpperCamelCase :str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__UpperCamelCase :Dict = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__UpperCamelCase :Tuple = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__UpperCamelCase :int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __lowercase , *__lowercase , __lowercase=None , __lowercase=None , **__lowercase) -> Tuple:
return super().__call__(__lowercase , *__lowercase , num_workers=__lowercase , batch_size=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase=64 , __lowercase = 0 , __lowercase = 512 / 1_500 , __lowercase = 32 , __lowercase = 1 , ) -> List[str]:
__UpperCamelCase :List[str] = load_image(__lowercase)
__UpperCamelCase :Optional[Any] = self.image_processor.size['''longest_edge''']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self.image_processor.generate_crop_boxes(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Tuple = self.image_processor(images=__lowercase , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
__UpperCamelCase :Dict = self.get_inference_context()
with inference_context():
__UpperCamelCase :Any = self._ensure_tensor_on_device(__lowercase , device=self.device)
__UpperCamelCase :List[str] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
__UpperCamelCase :str = image_embeddings
__UpperCamelCase :Any = grid_points.shape[1]
__UpperCamelCase :Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , __lowercase , __lowercase):
__UpperCamelCase :Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
__UpperCamelCase :Dict = input_labels[:, i : i + points_per_batch]
__UpperCamelCase :Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ ( self , __lowercase , __lowercase=0.88 , __lowercase=0.95 , __lowercase=0 , __lowercase=1 , ) -> Optional[Any]:
__UpperCamelCase :int = model_inputs.pop('''input_boxes''')
__UpperCamelCase :Union[str, Any] = model_inputs.pop('''is_last''')
__UpperCamelCase :Tuple = model_inputs.pop('''original_sizes''').tolist()
__UpperCamelCase :Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''').tolist()
__UpperCamelCase :Optional[Any] = self.model(**__lowercase)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__UpperCamelCase :Optional[Any] = model_outputs['''pred_masks''']
__UpperCamelCase :Union[str, Any] = self.image_processor.post_process_masks(
__lowercase , __lowercase , __lowercase , __lowercase , binarize=__lowercase)
__UpperCamelCase :Optional[int] = model_outputs['''iou_scores''']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowercase , __lowercase , __lowercase , __lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ ( self , __lowercase , __lowercase=False , __lowercase=False , __lowercase=0.7 , ) -> Union[str, Any]:
__UpperCamelCase :List[str] = []
__UpperCamelCase :Union[str, Any] = []
__UpperCamelCase :Union[str, Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
__UpperCamelCase :Tuple = torch.cat(__lowercase)
__UpperCamelCase :Optional[int] = torch.cat(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = self.image_processor.post_process_for_mask_generation(
__lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Any = defaultdict(__lowercase)
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowercase)
__UpperCamelCase :Tuple = {}
if output_rle_mask:
__UpperCamelCase :Dict = rle_mask
if output_bboxes_mask:
__UpperCamelCase :List[str] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 452 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCamelCase :Optional[Any] = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
__UpperCamelCase :List[str] = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
__UpperCamelCase :Optional[int] = shift_tokens_right(__lowercase , model.config.pad_token_id , model.config.decoder_start_token_id)
__UpperCamelCase :Tuple = model(__lowercase , decoder_input_ids=__lowercase).logits
__UpperCamelCase :Any = optax.softmax_cross_entropy(__lowercase , onehot(__lowercase , logits.shape[-1])).mean()
__UpperCamelCase :str = -(labels.shape[-1] * loss.item())
__UpperCamelCase :Optional[Any] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 452 | 1 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = 1
__lowerCamelCase = 2
while i * i <= n:
__lowerCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def a__ ( ):
__lowerCamelCase = 1
__lowerCamelCase = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 175 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModel)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowerCamelCase ( _BaseAutoModelClass ):
_lowerCAmelCase : Any = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowerCamelCase : Union[str, Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
lowerCamelCase : int = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class A__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , _a : argparse.Namespace , _a : Any=None , _a : int="base" , _a : Dict=None , _a : Tuple=None , _a : Any=None , **_a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =Path(self.hparams.output_dir )
_SCREAMING_SNAKE_CASE =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
_SCREAMING_SNAKE_CASE =config
_SCREAMING_SNAKE_CASE =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =MODEL_MODES[mode]
if model is None:
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
_SCREAMING_SNAKE_CASE =model
def A ( self : List[Any] , *_a : Optional[int] , **_a : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_type.from_pretrained(*_a , **_a )
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =arg_to_scheduler[self.hparams.lr_scheduler]
_SCREAMING_SNAKE_CASE =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_SCREAMING_SNAKE_CASE ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def A ( self : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model
_SCREAMING_SNAKE_CASE =['bias', 'LayerNorm.weight']
_SCREAMING_SNAKE_CASE =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_SCREAMING_SNAKE_CASE =Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
_SCREAMING_SNAKE_CASE =AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_SCREAMING_SNAKE_CASE =optimizer
_SCREAMING_SNAKE_CASE =self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : Tuple , _a : Dict , _a : List[str] ) -> str:
'''simple docstring'''
return self.validation_step(_a , _a )
def A ( self : Dict , _a : str ) -> Dict:
'''simple docstring'''
return self.validation_end(_a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_SCREAMING_SNAKE_CASE =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : int , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
_SCREAMING_SNAKE_CASE =len(self.test_dataloader().dataset )
else:
_SCREAMING_SNAKE_CASE =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_a )
_SCREAMING_SNAKE_CASE =len(self.train_dataloader().dataset )
def A ( self : Union[str, Any] , _a : str , _a : int , _a : bool = False ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def A ( self : str ) -> str:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_a )
def A ( self : int , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : Optional[Any] , _a : Dict[str, Any] ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.output_dir.joinpath('best_tfmr' )
_SCREAMING_SNAKE_CASE =self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def A ( _a : Any , _a : Any ) -> List[str]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=_a , type=_a , required=_a , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_a , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_a , type=_a , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_a ).parent / 'test_run' / 'cache' ) , type=_a , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_a , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_a , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_a , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_a , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=_a , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_a , metavar=_a , type=_a , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_a , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_a , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_a )
parser.add_argument('--train_batch_size' , default=32 , type=_a )
parser.add_argument('--eval_batch_size' , default=32 , type=_a )
parser.add_argument('--adafactor' , action='store_true' )
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : int ) -> Dict:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class A__ ( pl.Callback ):
def A ( self : Tuple , _a : str , _a : Tuple ) -> Tuple:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class A__ ( pl.Callback ):
def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =trainer.lr_schedulers[0]['scheduler']
_SCREAMING_SNAKE_CASE ={f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def A ( self : List[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> Optional[int]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def A ( self : Optional[Any] , _a : pl.Trainer , _a : pl.LightningModule ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
_SCREAMING_SNAKE_CASE =trainer.callback_metrics
# Log and save results to file
_SCREAMING_SNAKE_CASE =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_a , 'w' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_a , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_a , str(metrics[key] ) ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> None:
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=_UpperCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_UpperCamelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_UpperCamelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_UpperCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_UpperCamelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=_UpperCamelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCAmelCase ( _UpperCamelCase : BaseTransformer , _UpperCamelCase : argparse.Namespace , _UpperCamelCase : Any=None , _UpperCamelCase : str=True , _UpperCamelCase : Any=[] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , **_UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
_SCREAMING_SNAKE_CASE =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_SCREAMING_SNAKE_CASE =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCamelCase )
if logging_callback is None:
_SCREAMING_SNAKE_CASE =LoggingCallback()
_SCREAMING_SNAKE_CASE ={}
if args.fpaa:
_SCREAMING_SNAKE_CASE =16
if args.gpus > 1:
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE ='ddp'
_SCREAMING_SNAKE_CASE =args.accumulate_grad_batches
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE ='auto'
_SCREAMING_SNAKE_CASE =pl.Trainer.from_argparse_args(
_UpperCamelCase , weights_summary=_UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCamelCase , )
if args.do_train:
trainer.fit(_UpperCamelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 405 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self : Optional[int] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =order
# a_{0} ... a_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
# y[n-1] ... y[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
def A ( self : List[Any] , _a : list[float] , _a : list[float] ) -> None:
'''simple docstring'''
if len(_a ) < self.order:
_SCREAMING_SNAKE_CASE =[1.0, *a_coeffs]
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
_SCREAMING_SNAKE_CASE =a_coeffs
_SCREAMING_SNAKE_CASE =b_coeffs
def A ( self : Union[str, Any] , _a : float ) -> float:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_SCREAMING_SNAKE_CASE =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_SCREAMING_SNAKE_CASE =self.input_history[:-1]
_SCREAMING_SNAKE_CASE =self.output_history[:-1]
_SCREAMING_SNAKE_CASE =sample
_SCREAMING_SNAKE_CASE =result
return result
| 405 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'RegNetConfig'
# Base docstring
_lowerCamelCase = 'facebook/regnet-y-040'
_lowerCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCamelCase = 'facebook/regnet-y-040'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ = 3 , a__ = 1 , a__ = 1 , a__ = "relu" , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCamelCase : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=a__ , strides=a__ , padding='''VALID''' , groups=a__ , use_bias=a__ , name='''convolution''' , )
_lowerCamelCase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''')
_lowerCamelCase : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.convolution(self.padding(a__))
_lowerCamelCase : Any = self.normalization(a__)
_lowerCamelCase : Any = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[int] = config.num_channels
_lowerCamelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : int = shape_list(a__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCamelCase : List[str] = tf.transpose(a__ , perm=(0, 2, 3, 1))
_lowerCamelCase : List[Any] = self.embedder(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ = 2 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Dict = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=1 , strides=a__ , use_bias=a__ , name='''convolution''')
_lowerCamelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''')
def __snake_case ( self , a__ , a__ = False):
"""simple docstring"""
return self.normalization(self.convolution(a__) , training=a__)
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='''pooler''')
_lowerCamelCase : str = [
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='''relu''' , name='''attention.0'''),
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2'''),
]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = self.pooler(a__)
for layer_module in self.attention:
_lowerCamelCase : Optional[Any] = layer_module(a__)
_lowerCamelCase : List[str] = hidden_state * pooled
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCamelCase : Optional[int] = max(1 , out_channels // config.groups_width)
_lowerCamelCase : int = (
TFRegNetShortCut(a__ , stride=a__ , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCamelCase : List[Any] = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='''layer.1'''),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='''layer.2'''),
]
_lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[Any] = layer_module(a__)
_lowerCamelCase : int = self.shortcut(a__)
hidden_state += residual
_lowerCamelCase : Union[str, Any] = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 1 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Dict = in_channels != out_channels or stride != 1
_lowerCamelCase : str = max(1 , out_channels // config.groups_width)
_lowerCamelCase : List[str] = (
TFRegNetShortCut(a__ , stride=a__ , name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''')
)
_lowerCamelCase : List[Any] = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0'''),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='''layer.1'''),
TFRegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4)) , name='''layer.2'''),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='''layer.3'''),
]
_lowerCamelCase : int = ACTaFN[config.hidden_act]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : str = layer_module(a__)
_lowerCamelCase : str = self.shortcut(a__)
hidden_state += residual
_lowerCamelCase : Optional[Any] = self.activation(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ = 2 , a__ = 2 , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : int = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_lowerCamelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , a__ , stride=a__ , name='''layers.0'''),
*[layer(a__ , a__ , a__ , name=F"""layers.{i+1}""") for i in range(depth - 1)],
]
def __snake_case ( self , a__):
"""simple docstring"""
for layer_module in self.layers:
_lowerCamelCase : Optional[Any] = layer_module(a__)
return hidden_state
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ))
_lowerCamelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(a__ , config.depths[1:])):
self.stages.append(TFRegNetStage(a__ , a__ , a__ , depth=a__ , name=F"""stages.{i+1}"""))
def __snake_case ( self , a__ , a__ = False , a__ = True):
"""simple docstring"""
_lowerCamelCase : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCamelCase : str = hidden_states + (hidden_state,)
_lowerCamelCase : str = stage_module(a__)
if output_hidden_states:
_lowerCamelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__)
@keras_serializable
class __A ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCAmelCase__ = RegNetConfig
def __init__( self , a__ , **a__):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : int = config
_lowerCamelCase : Tuple = TFRegNetEmbeddings(a__ , name='''embedder''')
_lowerCamelCase : str = TFRegNetEncoder(a__ , name='''encoder''')
_lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='''pooler''')
@unpack_inputs
def __snake_case ( self , a__ , a__ = None , a__ = None , a__ = False , ):
"""simple docstring"""
_lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Optional[int] = self.embedder(a__ , training=a__)
_lowerCamelCase : Optional[int] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__)
_lowerCamelCase : Dict = encoder_outputs[0]
_lowerCamelCase : Union[str, Any] = self.pooler(a__)
# Change to NCHW output format have uniformity in the modules
_lowerCamelCase : List[str] = tf.transpose(a__ , perm=(0, 3, 1, 2))
_lowerCamelCase : Optional[int] = tf.transpose(a__ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCamelCase : Tuple = tuple([tf.transpose(a__ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = RegNetConfig
UpperCAmelCase__ = """regnet"""
UpperCAmelCase__ = """pixel_values"""
@property
def __snake_case ( self):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)}
_lowerCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,lowerCamelCase__ ,)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , *a__ , **a__):
"""simple docstring"""
super().__init__(a__ , *a__ , **a__)
_lowerCamelCase : str = TFRegNetMainLayer(a__ , name='''regnet''')
@unpack_inputs
@add_start_docstrings_to_model_forward(a__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __snake_case ( self , a__ , a__ = None , a__ = None , a__=False , ):
"""simple docstring"""
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Tuple = self.regnet(
pixel_values=a__ , output_hidden_states=a__ , return_dict=a__ , training=a__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,lowerCamelCase__ ,)
class __A ( lowerCamelCase__ ,lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , *a__ , **a__):
"""simple docstring"""
super().__init__(a__ , *a__ , **a__)
_lowerCamelCase : Dict = config.num_labels
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(a__ , name='''regnet''')
# classification head
_lowerCamelCase : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __snake_case ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__=False , ):
"""simple docstring"""
_lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Union[str, Any] = self.regnet(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__)
_lowerCamelCase : Any = outputs.pooler_output if return_dict else outputs[1]
_lowerCamelCase : Union[str, Any] = self.classifier[0](a__)
_lowerCamelCase : str = self.classifier[1](a__)
_lowerCamelCase : List[Any] = None if labels is None else self.hf_compute_loss(labels=a__ , logits=a__)
if not return_dict:
_lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states)
| 706 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Tuple = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Union[str, Any] = controlnet_params
_lowerCamelCase : str = '''bird'''
_lowerCamelCase : Union[str, Any] = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''')
_lowerCamelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples)
_lowerCamelCase : Union[str, Any] = jax.random.PRNGKey(0)
_lowerCamelCase : Tuple = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : Dict = replicate(a__)
_lowerCamelCase : Tuple = shard(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : Optional[Any] = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Optional[Any] = controlnet_params
_lowerCamelCase : Optional[Any] = '''Chef in the kitchen'''
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''')
_lowerCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples)
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0)
_lowerCamelCase : List[str] = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : str = replicate(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : List[Any] = shard(a__)
_lowerCamelCase : Any = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : Dict = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 613 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaPipeline
snake_case_ = [
'image_embeds',
'negative_image_embeds',
]
snake_case_ = ['image_embeds', 'negative_image_embeds']
snake_case_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case_ = False
@property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return 32
@property
def _UpperCamelCase ( self : int ):
"""simple docstring"""
return self.time_input_dim
@property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 1_00
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCamelCase__ = UNetaDConditionModel(**a_ )
return model
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = self.dummy_unet
lowerCamelCase__ = self.dummy_movq
lowerCamelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=a_ , set_alpha_to_one=a_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=a_ , )
lowerCamelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _UpperCamelCase ( self : Any , a_ : Any , a_ : List[str]=0 ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a_ )
if str(a_ ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(a_ )
else:
lowerCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase__ = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**a_ )
lowerCamelCase__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(a_ ) )
lowerCamelCase__ = output.images
lowerCamelCase__ = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
lowerCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
lowerCamelCase__ = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = """red cat, 4k photo"""
lowerCamelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCamelCase__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCamelCase__ = pipeline(
image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=1_00 , output_type="""np""" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(a_ , a_ )
| 165 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a__ : Union[str, Any] = """
Human: <<task>>
Assistant: """
a__ : Tuple = """huggingface-tools/default-prompts"""
a__ : Any = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def snake_case (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
lowerCamelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , UpperCamelCase ) is not None:
return prompt_or_repo_id
lowerCamelCase__ = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 165 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Optional[Any] = BioGptTokenizer
snake_case : int = False
def snake_case_ (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_UpperCAmelCase : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCAmelCase : Optional[int] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : str = """lower newer"""
_UpperCAmelCase : Union[str, Any] = """lower newer"""
return input_text, output_text
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
_UpperCAmelCase : Any = """lower"""
_UpperCAmelCase : Optional[Any] = ["""low""", """er</w>"""]
_UpperCAmelCase : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = tokens + ["""<unk>"""]
_UpperCAmelCase : int = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@slow
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
_UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 721 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase : Any = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
_UpperCAmelCase : Any = grid[row_n]
_UpperCAmelCase : Optional[Any] = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = grid[row_n]
return grid[-1][-1]
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a: Tuple = '''sshleifer/bart-tiny-random'''
__a: Any = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=lowerCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=lowerCamelCase , d=lowerCamelCase ) | 108 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__=-1 ):
'''simple docstring'''
__A =label_idx
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
__A =[]
__A =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
__A =[]
__A =[]
else:
__A =line.split(''' ''' )
words.append(splits[0] )
if len(lowercase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowercase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__A =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowercase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
__A =f.read().splitlines()
if "O" not in labels:
__A =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
__A =mode.value
__A =os.path.join(lowercase__ , f'''{mode}.txt''' )
__A =1
__A =[]
with open(lowercase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowercase__ ):
__A =[]
__A =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowercase__ ) == len(lowercase__ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase__ , labels=lowercase__ ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =0
for sentence in parse_incr(lowercase__ ):
__A =preds_list[example_id]
__A =''''''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase__ )
example_id += 1
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if path:
with open(lowercase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 184 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__UpperCamelCase : bool = field(
default=a__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__UpperCamelCase : bool = field(
default=a__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCamelCase : Optional[str] = field(default=a__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
__UpperCamelCase : bool = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__UpperCamelCase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__UpperCamelCase : Optional[int] = field(
default=a__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
__UpperCamelCase : Optional[int] = field(
default=a__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__UpperCamelCase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCamelCase : bool = field(
default=a__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __magic_name__ (self ) -> str:
"""simple docstring"""
if self.train_file is not None:
SCREAMING_SNAKE_CASE__ : str = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE__ : str = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowercase_ ( _snake_case ,_snake_case ):
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [json.loads(_snake_case ) for line in f.read().splitlines() if (len(_snake_case ) > 0 and not line.isspace())]
assert len(_snake_case ) == len(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = refs
return Dataset.from_dict(_snake_case )
def lowercase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,_snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset(data_args.dataset_name ,data_args.dataset_config_name )
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'''train[:{data_args.validation_split_percentage}%]''' ,)
SCREAMING_SNAKE_CASE__ : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=f'''train[{data_args.validation_split_percentage}%:]''' ,)
else:
SCREAMING_SNAKE_CASE__ : int = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = data_args.validation_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE__ : int = """text"""
SCREAMING_SNAKE_CASE__ : Any = load_dataset(_snake_case ,data_files=_snake_case )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(model_args.config_name ,**_snake_case )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,**_snake_case )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,**_snake_case )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ : Dict = AutoModelForMaskedLM.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets["""train"""].column_names
else:
SCREAMING_SNAKE_CASE__ : int = datasets["""validation"""].column_names
SCREAMING_SNAKE_CASE__ : List[str] = """text""" if """text""" in column_names else column_names[0]
SCREAMING_SNAKE_CASE__ : Any = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(_snake_case ):
# Remove empty lines
SCREAMING_SNAKE_CASE__ : List[str] = [line for line in examples["""text"""] if len(_snake_case ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] ,padding=_snake_case ,truncation=_snake_case ,max_length=data_args.max_seq_length )
SCREAMING_SNAKE_CASE__ : Dict = datasets.map(
_snake_case ,batched=_snake_case ,num_proc=data_args.preprocessing_num_workers ,remove_columns=[text_column_name] ,load_from_cache_file=not data_args.overwrite_cache ,)
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE__ : int = add_chinese_references(tokenized_datasets["""train"""] ,data_args.train_ref_file )
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE__ : List[str] = add_chinese_references(
tokenized_datasets["""validation"""] ,data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE__ : Any = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE__ : int = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=_snake_case ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Any = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None ,eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None ,tokenizer=_snake_case ,data_collator=_snake_case ,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE__ : str = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE__ : str = os.path.join(training_args.output_dir ,"""train_results.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# Evaluation
SCREAMING_SNAKE_CASE__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : List[Any] = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : str = math.exp(eval_output["""eval_loss"""] )
SCREAMING_SNAKE_CASE__ : List[str] = perplexity
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(training_args.output_dir ,"""eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def lowercase_ ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
import baseaa
def lowercase_ ( _snake_case ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def lowercase_ ( _snake_case ):
return baseaa.baadecode(_snake_case ).decode("""utf-8""" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = 'Hello World!'
UpperCAmelCase__ : Tuple = baseaa_encode(test)
print(encoded)
UpperCAmelCase__ : Tuple = baseaa_decode(encoded)
print(decoded)
| 545 | 0 |
'''simple docstring'''
from typing import Any
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple , __UpperCamelCase: str ) -> Tuple:
__magic_name__ : Dict = data
__magic_name__ : List[Any] = None
def __repr__( self: Union[str, Any] ) -> str:
return f"""Node({self.data})"""
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = None
def __iter__( self: Dict ) -> Any:
__magic_name__ : str = self.head
while node:
yield node.data
__magic_name__ : List[str] = node.next
def __len__( self: Union[str, Any] ) -> int:
return sum(1 for _ in self )
def __repr__( self: List[Any] ) -> str:
return "->".join([str(__UpperCamelCase ) for item in self] )
def __getitem__( self: List[str] , __UpperCamelCase: Any ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self: Optional[int] , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[int] ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__magic_name__ : List[str] = self.head
for _ in range(__UpperCamelCase ):
__magic_name__ : Tuple = current.next
__magic_name__ : Union[str, Any] = data
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: List[str] ) -> None:
self.insert_nth(len(self ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: int , __UpperCamelCase: str ) -> None:
self.insert_nth(0 , __UpperCamelCase )
def lowerCAmelCase__ ( self: Tuple , __UpperCamelCase: List[Any] , __UpperCamelCase: Tuple ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__magic_name__ : Dict = Node(__UpperCamelCase )
if self.head is None:
__magic_name__ : Union[str, Any] = new_node
elif index == 0:
__magic_name__ : str = self.head # link new_node to head
__magic_name__ : List[str] = new_node
else:
__magic_name__ : str = self.head
for _ in range(index - 1 ):
__magic_name__ : Tuple = temp.next
__magic_name__ : Optional[int] = temp.next
__magic_name__ : Optional[Any] = new_node
def lowerCAmelCase__ ( self: Optional[int] ) -> None: # print every node data
print(self )
def lowerCAmelCase__ ( self: Any ) -> Any:
return self.delete_nth(0 )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase__ ( self: Union[str, Any] , __UpperCamelCase: str = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__magic_name__ : Optional[Any] = self.head # default first node
if index == 0:
__magic_name__ : Any = self.head.next
else:
__magic_name__ : Optional[int] = self.head
for _ in range(index - 1 ):
__magic_name__ : Union[str, Any] = temp.next
__magic_name__ : List[str] = temp.next
__magic_name__ : Any = temp.next.next
return delete_node.data
def lowerCAmelCase__ ( self: Dict ) -> bool:
return self.head is None
def lowerCAmelCase__ ( self: List[Any] ) -> None:
__magic_name__ : int = None
__magic_name__ : Dict = self.head
while current:
# Store the current node's next node.
__magic_name__ : Optional[int] = current.next
# Make the current node's next point backwards
__magic_name__ : Union[str, Any] = prev
# Make the previous node be the current node
__magic_name__ : Dict = current
# Make the current node the next node (to progress iteration)
__magic_name__ : Union[str, Any] = next_node
# Return prev in order to put the head at the end
__magic_name__ : str = prev
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase__ ) == i
linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase__ ) == 9
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__magic_name__ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(-8 , 1 ) )
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
__magic_name__ : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__magic_name__ : int = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__magic_name__ : Optional[int] = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__magic_name__ : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase__ )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCamelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__magic_name__ : str = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(UpperCamelCase__ )
print("\nReading/changing Node data using indexing:" )
print(F"""Element at Position 1: {linked_list[1]}""" )
__magic_name__ : Optional[int] = input("Enter New Value: " ).strip()
print("New list:" )
print(UpperCamelCase__ )
print(F"""length of linked_list is : {len(UpperCamelCase__ )}""" )
if __name__ == "__main__":
main() | 436 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase = 20 ) -> int:
snake_case : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case : Dict = n // 2
return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCamelCase : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 587 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _lowerCamelCase :
def __init__( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=1_3 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : str=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[Any]=9_9 , lowerCamelCase_ : Optional[Any]=3_2 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : Optional[Any]=3_7 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Dict=5_1_2 , lowerCamelCase_ : Any=1_6 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Dict=3 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : int=None , lowerCamelCase_ : int=1_0_0_0 , ):
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Union[str, Any] = is_training
_lowercase : str = use_input_mask
_lowercase : Optional[int] = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : Optional[int] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[str] = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : Optional[int] = num_labels
_lowercase : int = num_choices
_lowercase : int = scope
_lowercase : int = range_bbox
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : str = bbox[i, j, 3]
_lowercase : Dict = bbox[i, j, 1]
_lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : List[str] = bbox[i, j, 2]
_lowercase : int = bbox[i, j, 0]
_lowercase : Tuple = t
_lowercase : str = tf.convert_to_tensor(lowerCamelCase_ )
_lowercase : Union[str, Any] = None
if self.use_input_mask:
_lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : int = None
_lowercase : Union[str, Any] = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Any ):
"""simple docstring"""
_lowercase : Tuple = TFLayoutLMModel(config=lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_lowercase : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
_lowercase : str = model(lowerCamelCase_ , lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = TFLayoutLMForMaskedLM(config=lowerCamelCase_ )
_lowercase : Union[str, Any] = model(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
_lowercase : Dict = self.num_labels
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification(config=lowerCamelCase_ )
_lowercase : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : str = TFLayoutLMForTokenClassification(config=lowerCamelCase_ )
_lowercase : Optional[Any] = model(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : List[Any] = TFLayoutLMForQuestionAnswering(config=lowerCamelCase_ )
_lowercase : Optional[Any] = model(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = config_and_inputs
_lowercase : List[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_snake_case = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = True
_snake_case = 10
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Dict = TFLayoutLMModelTester(self )
_lowercase : Dict = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = TFLayoutLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Any = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Dict = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Optional[int] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _lowerCamelCase (unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : int = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Union[str, Any] = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[int] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : List[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowerCamelCase_ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : int = outputs.loss
_lowercase : str = (2,)
self.assertEqual(loss.shape , lowerCamelCase_ )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Optional[int] = (2, 2)
self.assertEqual(logits.shape , lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_lowercase : List[Any] = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : str = model(
input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
# test the shape of the logits
_lowercase : Optional[Any] = outputs.logits
_lowercase : Tuple = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , lowerCamelCase_ )
@slow
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Optional[int] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
# test the shape of the logits
_lowercase : Optional[int] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , lowerCamelCase_ )
self.assertEqual(outputs.end_logits.shape , lowerCamelCase_ )
| 283 | """simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Optional[int] = int(__UpperCAmelCase )
_lowercase , _lowercase , _lowercase : Union[str, Any] = t // 3_600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=300 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : List[Any] = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Optional[Any] = F'''{elt:.6f}''' if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else str(__UpperCAmelCase )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
_snake_case = 5
_snake_case = 0.2
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 3_0_0 , ):
"""simple docstring"""
_lowercase : int = total
_lowercase : Dict = '' if prefix is None else prefix
_lowercase : List[Any] = leave
_lowercase : Dict = parent
_lowercase : str = width
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ):
"""simple docstring"""
_lowercase : Dict = value
if comment is not None:
_lowercase : List[str] = comment
if self.last_value is None:
_lowercase : List[Any] = time.time()
_lowercase : str = value
_lowercase : Any = None
_lowercase : List[Any] = self.warmup
_lowercase : Union[str, Any] = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : Tuple = time.time()
_lowercase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
_lowercase : List[Any] = None
if value >= self.total:
_lowercase : Optional[Any] = self.total
_lowercase : str = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : int = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
_lowercase : Optional[int] = value
_lowercase : Dict = current_time
if self.average_time_per_item is None:
_lowercase : Union[str, Any] = 1
else:
_lowercase : Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
_lowercase : str = ' ' * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
_lowercase : Dict = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Optional[int] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
_lowercase : int = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : List[str] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=None ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_lowercase : Tuple = None if column_names is None else [column_names]
_lowercase : Optional[int] = None
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if self.inner_table is None:
_lowercase : Optional[int] = [list(values.keys() ), list(values.values() )]
else:
_lowercase : str = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
_lowercase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : int=3_0_0 ):
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Any = None
self.display()
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : Optional[Any] ):
"""simple docstring"""
_lowercase : int = None
_lowercase : Any = None
_lowercase : Optional[int] = False
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Optional[Any] = 0
_lowercase : Union[str, Any] = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowercase : Tuple = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
_lowercase : Tuple = False
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : str ):
"""simple docstring"""
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Tuple = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
_lowercase : Optional[Any] = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Union[str, Any] = None
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Any = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : Any = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=None , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_lowercase : Union[str, Any] = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Optional[int] = int(state.epoch )
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : int = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_lowercase : Tuple = re.sub(r'\_loss$' , '' , lowerCamelCase_ )
_lowercase : Optional[Any] = metrics.pop('total_flos' , lowerCamelCase_ )
_lowercase : List[str] = metrics.pop('epoch' , lowerCamelCase_ )
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCamelCase_ )
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCamelCase_ )
_lowercase : Any = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCamelCase_ )
_lowercase : Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCamelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Optional[Any] = v
else:
_lowercase : Tuple = k.split('_' )
_lowercase : List[Any] = ' '.join([part.capitalize() for part in splits[1:]] )
_lowercase : List[Any] = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
_lowercase : Any = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Dict = True
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCamelCase_ )
_lowercase : List[Any] = None
| 283 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1e-12):
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
SCREAMING_SNAKE_CASE = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_UpperCAmelCase , axis=1) , a_min=_UpperCAmelCase)).T
return jnp.matmul(_UpperCAmelCase , norm_emb_a.T)
class _snake_case ( nn.Module ):
_lowercase : CLIPConfig
_lowercase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = FlaxCLIPVisionModule(self.config.vision_config)
SCREAMING_SNAKE_CASE = nn.Dense(self.config.projection_dim , use_bias=a , dtype=self.dtype)
SCREAMING_SNAKE_CASE = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim))
SCREAMING_SNAKE_CASE = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,))
SCREAMING_SNAKE_CASE = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,))
def __call__( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.vision_model(a)[1]
SCREAMING_SNAKE_CASE = self.visual_projection(a)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.special_care_embeds)
SCREAMING_SNAKE_CASE = jax_cosine_distance(a , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(special_scores > 0 , axis=1 , keepdims=a)
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE = is_special_care * 0.01
SCREAMING_SNAKE_CASE = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE = jnp.round(a , 3)
SCREAMING_SNAKE_CASE = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class _snake_case ( A__ ):
_lowercase : Optional[Any] = CLIPConfig
_lowercase : Optional[Any] = '''clip_input'''
_lowercase : Dict = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a , a = None , a = 0 , a = jnp.floataa , a = True , **a , ) -> Dict:
if input_shape is None:
SCREAMING_SNAKE_CASE = (1, 224, 224, 3)
SCREAMING_SNAKE_CASE = self.module_class(config=a , dtype=a , **a)
super().__init__(a , a , input_shape=a , seed=a , dtype=a , _do_init=_do_init)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> FrozenDict:
# init input tensor
SCREAMING_SNAKE_CASE = jax.random.normal(a , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jax.random.split(a)
SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
SCREAMING_SNAKE_CASE = self.module.init(a , a)['params']
return random_params
def __call__( self , a , a = None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = jnp.transpose(a , (0, 2, 3, 1))
return self.module.apply(
{'params': params or self.params} , jnp.array(a , dtype=jnp.floataa) , rngs={} , )
| 73 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json()
lowerCamelCase__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowercase ):
lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json()
lowerCamelCase__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowercase ):
lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase )
lowerCamelCase__ = result.headers["""Location"""]
lowerCamelCase__ = requests.get(__lowercase , allow_redirects=__lowercase )
lowerCamelCase__ = os.path.join(__lowercase , f"""{artifact_name}.zip""" )
with open(__lowercase , """wb""" ) as fp:
fp.write(response.content )
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = None
with zipfile.ZipFile(__lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase ) as f:
for line in f:
lowerCamelCase__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase__ = line[: line.index(""": """ )]
lowerCamelCase__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCamelCase__ = line[len("""FAILED """ ) :]
failed_tests.append(__lowercase )
elif filename == "job_name.txt":
lowerCamelCase__ = line
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` """
f"""and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCamelCase__ = None
if job_name and job_links:
lowerCamelCase__ = job_links.get(__lowercase , __lowercase )
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase__ = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )]
return result
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) )
return errors
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = Counter()
counter.update([x[1] for x in logs] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCamelCase__ = test.split("""/""" )[2]
else:
lowerCamelCase__ = None
return test
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCamelCase__ = [x for x in logs if x[2] is not None]
lowerCamelCase__ = {x[2] for x in logs}
lowerCamelCase__ = {}
for test in tests:
lowerCamelCase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase__ = sum(error_counts.values() )
if n_errors > 0:
lowerCamelCase__ = {"""count""": n_errors, """errors""": error_counts}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """| no. | error | status |"""
lowerCamelCase__ = """|-:|:-|:-|"""
lowerCamelCase__ = [header, sep]
for error in reduced_by_error:
lowerCamelCase__ = reduced_by_error[error]["""count"""]
lowerCamelCase__ = f"""| {count} | {error[:100]} | |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """| model | no. of errors | major error | count |"""
lowerCamelCase__ = """|-:|-:|-:|-:|"""
lowerCamelCase__ = [header, sep]
for model in reduced_by_model:
lowerCamelCase__ = reduced_by_model[model]["""count"""]
lowerCamelCase__ , lowerCamelCase__ = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCamelCase__ = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
__magic_name__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__magic_name__ = get_job_links(args.workflow_run_id, token=args.token)
__magic_name__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__magic_name__ = k.find(""" / """)
__magic_name__ = k[index + len(""" / """) :]
__magic_name__ = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__magic_name__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__magic_name__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__magic_name__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__magic_name__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__magic_name__ = reduce_by_error(errors)
__magic_name__ = reduce_by_model(errors)
__magic_name__ = make_github_table(reduced_by_error)
__magic_name__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 129 | 0 |
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCamelCase_ , n - 1 , lowerCamelCase_ ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(lowerCamelCase_ , n / 2 , lowerCamelCase_ )
return (b * b) % mod
# a prime number
__lowerCamelCase : str = 701
__lowerCamelCase : int = 1_000_000_000
__lowerCamelCase : List[str] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 457 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =CpmAntTokenizer
lowercase : Dict =False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = ["今天", "天气", "真", "好", "!"]
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = [tokenizer.bos_token] + tokens
UpperCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 457 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=__snake_case):
'''simple docstring'''
lowerCamelCase : Any = ["torch", "torchsde"]
def __init__( self , *a__ , **a__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __lowercase ( cls , *a__ , **a__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __lowercase ( cls , *a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 455 |
from __future__ import annotations
import math
def _UpperCamelCase ( lowerCAmelCase_ ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__a = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _UpperCamelCase ( lowerCAmelCase_ ) ->list[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
UpperCAmelCase = []
for num in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
UpperCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def _UpperCamelCase ( ) ->int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 377 | 0 |
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(lowercase_ )
_SCREAMING_SNAKE_CASE = len(lowercase_ )
_SCREAMING_SNAKE_CASE = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_SCREAMING_SNAKE_CASE = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_SCREAMING_SNAKE_CASE = True
if a[i].islower():
_SCREAMING_SNAKE_CASE = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import numpy as np
def __lowerCamelCase ( snake_case__ ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {}
if train_file is not None:
lowerCAmelCase__ = [train_file]
if eval_file is not None:
lowerCAmelCase__ = [eval_file]
if test_file is not None:
lowerCAmelCase__ = [test_file]
lowerCAmelCase__ = datasets.load_dataset("csv" , data_files=UpperCamelCase_ )
lowerCAmelCase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowerCAmelCase__ = features_name.pop(UpperCamelCase_ )
lowerCAmelCase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCAmelCase__ = {label: i for i, label in enumerate(UpperCamelCase_ )}
lowerCAmelCase__ = tokenizer.model_input_names
lowerCAmelCase__ = {}
if len(UpperCamelCase_ ) == 1:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda UpperCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" ) , batched=UpperCamelCase_ , )
elif len(UpperCamelCase_ ) == 2:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda UpperCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" , ) , batched=UpperCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCAmelCase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCAmelCase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCAmelCase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
a_ =field(metadata={"""help""": """Which column contains the label"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the training file"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the development file"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the test file"""} )
a_ =field(
default=128, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowercase__ :
a_ =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase_ ) , labelaid=UpperCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCAmelCase__ = TFTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(UpperCamelCase_ )
return results
if __name__ == "__main__":
main()
| 339 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a_ = logging.getLogger(__name__)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=UpperCamelCase_ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=UpperCamelCase_ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=UpperCamelCase_ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=UpperCamelCase_ , default="data/dump" , help="The dump file prefix." )
lowerCAmelCase__ = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(UpperCamelCase_ )} examples to process." )
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 10_000
lowerCAmelCase__ = time.time()
for text in data:
lowerCAmelCase__ = F"{bos} {text.strip()} {sep}"
lowerCAmelCase__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
rslt.append(UpperCamelCase_ )
iter += 1
if iter % interval == 0:
lowerCAmelCase__ = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
lowerCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(UpperCamelCase_ )} examples processed." )
lowerCAmelCase__ = F"{args.dump_file}.{args.tokenizer_name}.pickle"
lowerCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase__ = [np.uintaa(UpperCamelCase_ ) for d in rslt]
else:
lowerCAmelCase__ = [np.intaa(UpperCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(UpperCamelCase_ , "wb" ) as handle:
pickle.dump(rslt_ , UpperCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 339 | 1 |
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = b % a, a
return b
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if gcd(_a , _a ) != 1:
UpperCAmelCase_ : int = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 1, 0, a
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 1, m
while va != 0:
UpperCAmelCase_ : Dict = ua // va
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 322 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
'''simple docstring'''
A__ : Any = BlenderbotConfig
A__ : List[str] = {}
A__ : Tuple = "gelu"
def __init__( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: List[Any]=37 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[int]=20 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: Any=0 ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : str = bos_token_id
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = TFBlenderbotModel(config=lowerCamelCase_ ).get_decoder()
UpperCAmelCase_ : List[str] = inputs_dict["""input_ids"""]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase_ : str = inputs_dict["""head_mask"""]
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-3 )
def lowerCamelCase_ ( _a : Any , _a : Tuple , _a : Any , _a : Optional[int]=None , _a : int=None , _a : int=None , _a : int=None , _a : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : Optional[int] = False
A__ : Union[str, Any] = False
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : str = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = ["My friends are cool but they eat too many carbs."]
A__ : Optional[int] = "facebook/blenderbot-400M-distill"
@cached_property
def A__ ( self: Optional[Any] ) -> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self: Tuple ) -> List[str]:
UpperCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" )
UpperCAmelCase_ : Tuple = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowerCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 322 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __snake_case ( __UpperCamelCase : bool = True ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : Dict ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A_ = False
if main_process_only:
A_ = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase ,**__UpperCamelCase ,disable=__UpperCamelCase ) | 86 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]):
return float((preds == labels).mean())
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : Union[str, Any] = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : List[Any] = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__))
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict):
lowerCamelCase : Tuple = np.array(UpperCAmelCase__)
lowerCamelCase : str = np.array(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = en_sentvecs.shape[0]
# mean centering
lowerCamelCase : Optional[int] = en_sentvecs - np.mean(UpperCAmelCase__ , axis=0)
lowerCamelCase : Union[str, Any] = in_sentvecs - np.mean(UpperCAmelCase__ , axis=0)
lowerCamelCase : Any = cdist(UpperCAmelCase__ , UpperCAmelCase__ , 'cosine')
lowerCamelCase : Optional[Any] = np.array(range(UpperCAmelCase__))
lowerCamelCase : Optional[int] = sim.argsort(axis=1)[:, :10]
lowerCamelCase : Dict = np.any(preds == actual[:, None] , axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ), codebase_urls=[], reference_urls=[], format='numpy' if self.config_name != 'cvit-mkb-clsr' else None, )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(A, A )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(A, A )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(A, A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 320 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ ={
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]:
__SCREAMING_SNAKE_CASE = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ : List[str] = {
'''E''': 1_2.7_0,
'''T''': 9.0_6,
'''A''': 8.1_7,
'''O''': 7.5_1,
'''I''': 6.9_7,
'''N''': 6.7_5,
'''S''': 6.3_3,
'''H''': 6.0_9,
'''R''': 5.9_9,
'''D''': 4.2_5,
'''L''': 4.0_3,
'''C''': 2.7_8,
'''U''': 2.7_6,
'''M''': 2.4_1,
'''W''': 2.3_6,
'''F''': 2.2_3,
'''G''': 2.0_2,
'''Y''': 1.9_7,
'''P''': 1.9_3,
'''B''': 1.2_9,
'''V''': 0.9_8,
'''K''': 0.7_7,
'''J''': 0.1_5,
'''X''': 0.1_5,
'''Q''': 0.1_0,
'''Z''': 0.0_7,
}
lowercase__ : Optional[Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
lowercase__ : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> dict[str, int]:
lowerCAmelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = get_letter_count(snake_case__ )
lowerCAmelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(snake_case__ )
lowerCAmelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=snake_case__ )
lowerCAmelCase = ''''''.join(freq_to_letter[freq] )
lowerCAmelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
lowerCAmelCase = get_frequency_order(snake_case__ )
lowerCAmelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | lowercase__ : Union[str, Any] = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 312 | 1 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case: Any = psutil.Process()
snake_case: Any = False
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = -1
while True:
snake_case: str = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = True
snake_case: Union[str, Any] = threading.Thread(target=self.peak_monitor )
snake_case: List[Any] = True
self.thread.start()
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = False
self.thread.join()
return self.cpu_memory_peak
__UpperCAmelCase = PeakCPUMemory()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Any = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case: Dict = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case: Union[str, Any] = torch.cuda.memory_allocated(__A )
torch.cuda.reset_peak_memory_stats()
return measures
def lowerCAmelCase_ ( __A : List[Any] ):
'''simple docstring'''
snake_case: str = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case: Optional[Any] = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
snake_case: int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case: List[Any] = (torch.cuda.memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
snake_case: str = (torch.cuda.max_memory_allocated(__A ) - start_measures[str(__A )]) / 2**20
return measures
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Dict ):
'''simple docstring'''
print(f"""{description}:""" )
print(f"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__A )]:.2f}MiB""" )
snake_case: Optional[Any] = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" ) | 692 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : float ) -> float:
"""simple docstring"""
return 0.0
def snake_case_ (__A : np.ndarray , __A : int ) -> tuple[int | float, int | float]:
__lowerCAmelCase : Dict = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCAmelCase : Dict = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def snake_case_ (__A : FilterType , __A : int ) -> None:
__lowerCAmelCase : Any = 5_1_2
__lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
__lowerCAmelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
__lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase : List[Any] = np.abs(np.fft.fft(__A ) )
__lowerCAmelCase : str = 2_0 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
__lowerCAmelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def snake_case_ (__A : FilterType , __A : int ) -> None:
__lowerCAmelCase : int = 5_1_2
__lowerCAmelCase : str = [1] + [0] * (size - 1)
__lowerCAmelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
__lowerCAmelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase : Union[str, Any] = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 651 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple ="camembert"
def __init__( self : Any , lowerCAmelCase : List[str]=3_05_22 , lowerCAmelCase : Any=7_68 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Union[str, Any]=30_72 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=5_12 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Union[str, Any]=1e-12 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Dict="absolute" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : Any = position_embedding_type
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 651 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase = logging.getLogger(__name__)
class A_ :
def __init__( self : Dict ) -> List[Any]:
__magic_name__ = False
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ) -> int:
if not self.initialized:
__magic_name__ = RagRetriever(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__magic_name__ = True
def _snake_case ( self : Dict ) -> List[str]:
self.retriever.index.init_index()
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.retriever._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class A_ ( snake_case_ ):
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None ) -> Union[str, Any]:
if index is not None and index.is_initialized() and len(__lowerCamelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
__magic_name__ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for worker in self.retrieval_workers
] )
def _snake_case ( self : List[str] ) -> List[str]:
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__magic_name__ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__magic_name__ , __magic_name__ = ray.get(random_worker.retrieve.remote(__lowerCamelCase , __lowerCamelCase ) )
else:
__magic_name__ , __magic_name__ = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
@classmethod
def _snake_case ( cls : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] ) -> List[Any]:
return super(__lowerCamelCase , cls ).get_tokenizers(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( cls : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Dict ) -> Tuple:
__magic_name__ = kwargs.pop("config" , __lowerCamelCase ) or RagConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = RagTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
__magic_name__ = rag_tokenizer.question_encoder
__magic_name__ = rag_tokenizer.generator
if indexed_dataset is not None:
__magic_name__ = "custom"
__magic_name__ = CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase )
else:
__magic_name__ = cls._build_index(__lowerCamelCase )
return cls(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , retrieval_workers=__lowerCamelCase , index=__lowerCamelCase , )
| 468 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
__magic_name__ = [e for e in lst if e < pivot]
__magic_name__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 468 | 1 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = UNetaDModel
UpperCamelCase : Tuple = 'sample'
@property
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =4
SCREAMING_SNAKE_CASE_: int =3
SCREAMING_SNAKE_CASE_: Tuple =(32, 32)
SCREAMING_SNAKE_CASE_: Dict =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ={
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_input
return init_dict, inputs_dict
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[Any] = UNetaDModel
UpperCamelCase : Dict = 'sample'
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =4
SCREAMING_SNAKE_CASE_: Any =4
SCREAMING_SNAKE_CASE_: Optional[int] =(32, 32)
SCREAMING_SNAKE_CASE_: str =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return (4, 32, 32)
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ={
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
SCREAMING_SNAKE_CASE_: str =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCAmelCase )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCAmelCase )
model_accelerate.to(lowerCAmelCase )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_: Tuple =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_: List[str] =noise.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =model_accelerate(lowerCAmelCase , lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCAmelCase , low_cpu_mem_usage=lowerCAmelCase )
model_normal_load.to(lowerCAmelCase )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_: Dict =model_normal_load(lowerCAmelCase , lowerCAmelCase )["""sample"""]
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_: int =noise.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_: Optional[int] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_: Dict =torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 ) )
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = UNetaDModel
UpperCamelCase : Optional[int] = 'sample'
@property
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any=(32, 32) ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =4
SCREAMING_SNAKE_CASE_: List[Any] =3
SCREAMING_SNAKE_CASE_: Any =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
return (3, 32, 32)
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ={
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_input
SCREAMING_SNAKE_CASE_: int =floats_tensor((4, 3) + (256, 256) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =noise
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =4
SCREAMING_SNAKE_CASE_: Any =3
SCREAMING_SNAKE_CASE_: int =(256, 256)
SCREAMING_SNAKE_CASE_: Any =torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_: Dict =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =4
SCREAMING_SNAKE_CASE_: Union[str, Any] =3
SCREAMING_SNAKE_CASE_: List[str] =(32, 32)
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_: List[Any] =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
| 409 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = StableUnCLIPImgaImgPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : str = frozenset([] )
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE_: Tuple =CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: int =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =AutoencoderKL()
SCREAMING_SNAKE_CASE_: List[str] ={
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Union[str, Any]=True ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if pil_image:
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE_: Any =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str =StableUnCLIPImgaImgPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase )
inputs.update({"""image_embeds""": None} )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE_: Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE_: List[str] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" )
SCREAMING_SNAKE_CASE_: List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_: str =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: int =pipe(
lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: str =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 409 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase ( _A = True , *_A , **_A ) -> List[str]:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowercase : Optional[Any] = False
if main_process_only:
lowercase : List[Any] = PartialState().local_process_index == 0
return _tqdm(*_A , **_A , disable=_A )
| 348 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :str , __magic_name__ :Path , __magic_name__ :Union[str, None] = None , __magic_name__ :Union[List[str], None] = None , __magic_name__ :Union[str, List[str], None] = None , __magic_name__ :bool = True , ) ->Optional[Any]:
lowercase : Dict = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
lowercase : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
lowercase : List[str] = [file for file in files if n_ not in file]
else:
lowercase : str = [file for file in files if n_identifier not in file]
lowercase : List[str] = ignore_files or []
ignore_files.append("""__init__.py""" )
lowercase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
lowercase : List[Any] = file.split(""".""" )[0]
try:
lowercase : Dict = getattr(__magic_name__ , __magic_name__ )
lowercase : Dict = doctest.DocTestSuite(__magic_name__ )
lowercase : Optional[int] = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
lowercase : List[str] = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __snake_case ( self :Optional[Any] ) ->Dict:
lowercase : int = Path("""src/transformers""" )
lowercase : Tuple = """modeling"""
lowercase : List[str] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def __snake_case ( self :str ) ->str:
lowercase : Optional[int] = Path("""src/transformers""" )
lowercase : Tuple = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->str:
lowercase : Tuple = Path("""src/transformers""" )
lowercase : List[Any] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def __snake_case ( self :Tuple ) ->Any:
lowercase : str = Path("""src/transformers""" )
lowercase : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def __snake_case ( self :List[str] ) ->Tuple:
lowercase : List[str] = Path("""docs/source""" )
lowercase : int = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 348 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {"vocab_file": "vocab.txt"}
__UpperCamelCase : Optional[Any] = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
__UpperCamelCase : Any = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def __A ( __lowerCamelCase ) -> Any:
with open(snake_case__ , """r""" ) as f:
a = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :str , __magic_name__ :List[Any] , __magic_name__ :Dict="<unk>" , __magic_name__ :str="<cls>" , __magic_name__ :int="<pad>" , __magic_name__ :str="<mask>" , __magic_name__ :Dict="<eos>" , **__magic_name__ :int , ):
'''simple docstring'''
super().__init__(**lowercase_ )
a = load_vocab_file(lowercase_ )
a = dict(enumerate(self.all_tokens ) )
a = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a = unk_token
a = cls_token
a = pad_token
a = mask_token
a = eos_token
a = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Tuple ):
'''simple docstring'''
return self._id_to_token.get(lowercase_ , self.unk_token )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[int] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
return text.split()
def lowerCamelCase__ ( self :Any , __magic_name__ :str=False ):
'''simple docstring'''
return len(self._id_to_token )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase__ ( self :int , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return self._token_to_id.get(lowercase_ , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :int ):
'''simple docstring'''
return self._id_to_token.get(lowercase_ , self.unk_token )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any , __magic_name__ :Union[str, Any] = None ):
'''simple docstring'''
a = [self.cls_token_id]
a = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Dict , __magic_name__ :Tuple = None , __magic_name__ :Tuple = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase_ ) + [1]
return mask
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int , __magic_name__ :Any ):
'''simple docstring'''
a = os.path.join(lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(lowercase_ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=lowercase_ )
def lowerCamelCase__ ( self :int , __magic_name__ :Dict , __magic_name__ :List[Any] = False ):
'''simple docstring'''
return super()._add_tokens(lowercase_ , special_tokens=lowercase_ )
| 468 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[int] = "PoolFormerConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = [1, 5_12, 7, 7]
# Image classification docstring
UpperCAmelCase__ : List[str] = "sail/poolformer_s12"
UpperCAmelCase__ : Any = "tabby, tabby cat"
UpperCAmelCase__ : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A ( snake_case__ : int , snake_case__ : float = 0.0 , snake_case__ : bool = False ) -> Dict:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
__snake_case = 1 - drop_prob
__snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__snake_case = keep_prob + torch.rand(snake_case__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__snake_case = input.div(snake_case__ ) * random_tensor
return output
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ = None) -> None:
super().__init__()
__snake_case = drop_prob
def _a ( self , lowercase_) -> torch.Tensor:
return drop_path(lowercase_ , self.drop_prob , self.training)
def _a ( self) -> str:
return "p={}".format(self.drop_prob)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> str:
super().__init__()
__snake_case = patch_size if isinstance(lowercase_ , collections.abc.Iterable) else (patch_size, patch_size)
__snake_case = stride if isinstance(lowercase_ , collections.abc.Iterable) else (stride, stride)
__snake_case = padding if isinstance(lowercase_ , collections.abc.Iterable) else (padding, padding)
__snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_)
__snake_case = norm_layer(lowercase_) if norm_layer else nn.Identity()
def _a ( self , lowercase_) -> int:
__snake_case = self.projection(lowercase_)
__snake_case = self.norm(lowercase_)
return embeddings
class __lowercase ( nn.GroupNorm ):
def __init__( self , lowercase_ , **lowercase_) -> Dict:
super().__init__(1 , lowercase_ , **lowercase_)
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = nn.AvgPoolad(lowercase_ , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase_)
def _a ( self , lowercase_) -> str:
return self.pool(lowercase_) - hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
super().__init__()
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = PoolFormerDropPath(lowercase_)
if isinstance(config.hidden_act , lowercase_):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
def _a ( self , lowercase_) -> int:
__snake_case = self.conva(lowercase_)
__snake_case = self.act_fn(lowercase_)
__snake_case = self.drop(lowercase_)
__snake_case = self.conva(lowercase_)
__snake_case = self.drop(lowercase_)
return hidden_states
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
super().__init__()
__snake_case = PoolFormerPooling(lowercase_)
__snake_case = PoolFormerOutput(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
__snake_case = PoolFormerGroupNorm(lowercase_)
# Useful for training neural nets
__snake_case = PoolFormerDropPath(lowercase_) if drop_path > 0.0 else nn.Identity()
__snake_case = config.use_layer_scale
if config.use_layer_scale:
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
__snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_)) , requires_grad=lowercase_)
def _a ( self , lowercase_) -> Dict:
if self.use_layer_scale:
__snake_case = self.pooling(self.before_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = ()
__snake_case = self.output(self.after_norm(lowercase_))
__snake_case = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__snake_case = hidden_states + self.drop_path(lowercase_)
__snake_case = (output,) + outputs
return outputs
else:
__snake_case = self.drop_path(self.pooling(self.before_norm(lowercase_)))
# First residual connection
__snake_case = pooling_output + hidden_states
__snake_case = ()
# Second residual connection inside the PoolFormerOutput block
__snake_case = self.drop_path(self.output(self.after_norm(lowercase_)))
__snake_case = hidden_states + layer_output
__snake_case = (output,) + outputs
return outputs
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Dict:
super().__init__()
__snake_case = config
# stochastic depth decay rule
__snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__snake_case = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__snake_case = nn.ModuleList(lowercase_)
# Transformer blocks
__snake_case = []
__snake_case = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
lowercase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(lowercase_))
__snake_case = nn.ModuleList(lowercase_)
def _a ( self , lowercase_ , lowercase_=False , lowercase_=True) -> List[str]:
__snake_case = () if output_hidden_states else None
__snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__snake_case , __snake_case = layers
# Get patch embeddings from hidden_states
__snake_case = embedding_layer(lowercase_)
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase_):
__snake_case = blk(lowercase_)
__snake_case = layer_outputs[0]
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = PoolFormerConfig
__UpperCAmelCase = '''poolformer'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = True
def _a ( self , lowercase_) -> List[str]:
if isinstance(lowercase_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _a ( self , lowercase_ , lowercase_=False) -> int:
if isinstance(lowercase_ , lowercase_):
__snake_case = value
UpperCAmelCase__ : Optional[Any] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : List[str] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Optional[Any]:
super().__init__(lowercase_)
__snake_case = config
__snake_case = PoolFormerEncoder(lowercase_)
# Initialize weights and apply final processing
self.post_init()
def _a ( self) -> List[str]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__snake_case = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> List[str]:
super().__init__()
__snake_case = nn.Linear(config.hidden_size , config.hidden_size)
def _a ( self , lowercase_) -> List[Any]:
__snake_case = self.dense(lowercase_)
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , lowerCamelCase__ , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> str:
super().__init__(lowercase_)
__snake_case = config.num_labels
__snake_case = PoolFormerModel(lowercase_)
# Final norm
__snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.poolformer(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
__snake_case = outputs[0]
__snake_case = self.classifier(self.norm(lowercase_).mean([-2, -1]))
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(lowercase_ , lowercase_)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(lowercase_ , lowercase_)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
| 313 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase : Optional[Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : Tuple = {
'''RUCAIBox/mvp''': 1024,
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = MvpTokenizer
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="replace" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
_A : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
_A : Dict = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
_A : List[Any] = add_prefix_space
_A : Tuple = pre_tok_class(**UpperCAmelCase__ )
_A : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_A : Any = '''post_processor'''
_A : Union[str, Any] = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
_A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A : int = tuple(state['''sep'''] )
if "cls" in state:
_A : Union[str, Any] = tuple(state['''cls'''] )
_A : int = False
if state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
_A : Optional[int] = add_prefix_space
_A : Union[str, Any] = True
if state.get('''trim_offsets''' , UpperCAmelCase__ ) != trim_offsets:
_A : List[str] = trim_offsets
_A : int = True
if changes_to_apply:
_A : Optional[int] = getattr(UpperCAmelCase__ , state.pop('''type''' ) )
_A : str = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Tuple:
_A : Any = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
_A : Any = value
def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding:
_A : Optional[int] = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding:
_A : int = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
_A : List[Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Tuple:
_A : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 417 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 5_0 , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
_A : List[Any] = self.unet.config.sample_size
_A : List[Any] = (batch_size, 3, img_size, img_size)
_A : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A : str = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A : Dict = self.scheduler.schedule[t]
_A : Optional[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A : Tuple = self.scheduler.add_noise_to_input(UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : str = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A : Tuple = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A : List[str] = self.scheduler.step_correct(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , step_output.prev_sample , step_output['''derivative'''] , )
_A : Optional[int] = step_output.prev_sample
_A : List[str] = (sample / 2 + 0.5).clamp(0 , 1 )
_A : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A : int = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 417 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = DebertaTokenizer
A__ : Any = True
A__ : Dict = DebertaTokenizerFast
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : int = {"unk_token": "[UNK]"}
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , **snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = "lower newer"
snake_case : Tuple = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : List[Any] = "lower newer"
snake_case : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case : Union[str, Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
snake_case : Tuple = tokens + [tokenizer.unk_token]
snake_case : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.get_tokenizer()
snake_case : str = tokenizer("Hello" , "World" )
snake_case : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
snake_case : int = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
snake_case : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
snake_case : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : Union[str, Any] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case : Tuple = tokenizer(snake_case__ , padding=snake_case__ )
snake_case : Any = [tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) for seq in encoding["input_ids"]]
# fmt: off
snake_case : Optional[int] = {
"input_ids": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : List[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , snake_case__ )
for expected, decoded in zip(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , snake_case__ )
| 204 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 204 | 1 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''num_heads''' ) )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=64, UpperCamelCase__=3, UpperCamelCase__=[16, 48, 96], UpperCamelCase__=[1, 3, 6], UpperCamelCase__=[1, 2, 10], UpperCamelCase__=[7, 3, 3], UpperCamelCase__=[4, 2, 2], UpperCamelCase__=[2, 1, 1], UpperCamelCase__=[2, 2, 2], UpperCamelCase__=[False, False, True], UpperCamelCase__=[0.0, 0.0, 0.0], UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=2, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__snake_case = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 706 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'roberta-prelayernorm'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class A ( __UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 325 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = (DDPMParallelScheduler,)
def A ( self : Tuple , **a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**a_ )
return config
def A ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def A ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def A ( self : Dict ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def A ( self : List[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def A ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , )
def A ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def A ( self : List[str] ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = len(a_ )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = self.dummy_sample_deter + 0.1
__snake_case = self.dummy_sample_deter - 0.1
__snake_case = samplea.shape[0]
__snake_case = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case = torch.arange(a_ )[0:3, None].repeat(1 , a_ )
__snake_case = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case = scheduler.batch_step_no_noise(a_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__snake_case = torch.sum(torch.abs(a_ ) )
__snake_case = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = len(a_ )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
__snake_case = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
__snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(a_ ) )
__snake_case = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(prediction_type="v_prediction" )
__snake_case = scheduler_class(**a_ )
__snake_case = len(a_ )
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
__snake_case = model(a_ , a_ )
# 2. predict previous mean of sample x_t-1
__snake_case = scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
__snake_case = pred_prev_sample
__snake_case = torch.sum(torch.abs(a_ ) )
__snake_case = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
__snake_case = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
__snake_case = -1
else:
__snake_case = timesteps[i + 1]
__snake_case = scheduler.previous_timestep(a_ )
__snake_case = prev_t.item()
self.assertEqual(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [100, 87, 50, 51, 0]
with self.assertRaises(a_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [100, 87, 50, 1, 0]
__snake_case = len(a_ )
with self.assertRaises(a_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a_ )
__snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a_ )
| 69 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Any , a_ : Union[str, Any]=13 , a_ : Any=7 , a_ : Any=True , a_ : Dict=True , a_ : Union[str, Any]=False , a_ : Tuple=True , a_ : str=99 , a_ : Tuple=64 , a_ : Tuple=5 , a_ : Union[str, Any]=4 , a_ : Dict=64 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : List[str]=0.1 , a_ : Dict=512 , a_ : Tuple=16 , a_ : str=2 , a_ : Any=0.02 , a_ : List[Any]=3 , a_ : Tuple=4 , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : int ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A ( self : str ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Tuple , a_ : int , a_ : str , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Any , a_ : int , a_ : Tuple , a_ : str , a_ : int , a_ : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , a_ : Any , a_ : int , a_ : Union[str, Any] , a_ : Dict , a_ : Optional[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[Any] , a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Dict , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : str , a_ : Optional[int] , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = MPNetModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel.from_pretrained("microsoft/mpnet-base" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = model(a_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
__snake_case = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 69 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : List[Any] = 0
if start < end:
_UpperCAmelCase : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Any = a[end]
_UpperCAmelCase : List[str] = a[pivot]
_UpperCAmelCase : Optional[int] = temp
_UpperCAmelCase : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Tuple = a[end]
_UpperCAmelCase : Optional[Any] = a[pivot]
_UpperCAmelCase : Union[str, Any] = temp
_UpperCAmelCase : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_UpperCAmelCase : Optional[int] = new_pivot_index + 1
_UpperCAmelCase : Optional[Any] = a[new_pivot_index]
_UpperCAmelCase : Tuple = a[index]
_UpperCAmelCase : str = temp
_UpperCAmelCase : Any = a[new_pivot_index + 1]
_UpperCAmelCase : str = a[end]
_UpperCAmelCase : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Dict = TemporaryFile()
UpperCAmelCase__ : Dict = 100 # 1000 elements are to be sorted
UpperCAmelCase__ : str = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : int = np.load(outfile)
UpperCAmelCase__ : Optional[int] = len(M) - 1
UpperCAmelCase__ : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 703 |
from __future__ import annotations
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : list[list[int]] = []
_UpperCAmelCase : list[int] = []
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : str = sum(_UpperCamelCase )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return result
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if sum(_UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCamelCase )) < max_sum:
return
if sum(_UpperCamelCase ) == max_sum:
result.append(_UpperCamelCase )
return
for index in range(_UpperCamelCase , len(_UpperCamelCase ) ):
create_state_space_tree(
_UpperCamelCase , _UpperCamelCase , index + 1 , [*path, nums[index]] , _UpperCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ : List[Any] = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ : str = 9
UpperCAmelCase__ : Tuple = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 416 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[str] = {'vocab_file': 'spiece.model'}
lowercase_ : Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowercase_ : Tuple = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = None , **lowerCAmelCase , ) -> None:
SCREAMING_SNAKE_CASE__: Optional[Any]= {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
SCREAMING_SNAKE_CASE__: Dict= '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE__: List[Any]= '''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE__: List[str]= unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: Dict= eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= do_lower_case
SCREAMING_SNAKE_CASE__: int= remove_space
SCREAMING_SNAKE_CASE__: List[str]= keep_accents
SCREAMING_SNAKE_CASE__: int= vocab_file
SCREAMING_SNAKE_CASE__: int= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE__: Optional[int]= {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE__: str= re.compile(
f'[{"".join(map(lowerCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.__dict__.copy()
SCREAMING_SNAKE_CASE__: Dict= None
return state
def __setstate__( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= {}
SCREAMING_SNAKE_CASE__: Tuple= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Any= self.non_printing_characters_re.sub('''''' , lowerCAmelCase )
# Normalize whitespaces
SCREAMING_SNAKE_CASE__: Any= ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE__: Optional[Any]= unicodedata.normalize('''NFC''' , lowerCAmelCase )
return text
def UpperCamelCase_ ( self , lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.preprocess_text(lowerCAmelCase )
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase )
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase ) -> str:
return out_string
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
SCREAMING_SNAKE_CASE__: Optional[int]= ''''''
SCREAMING_SNAKE_CASE__: Any= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: Tuple= []
else:
current_sub_tokens.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string
def UpperCamelCase_ ( self ) -> Dict[str, int]:
SCREAMING_SNAKE_CASE__: List[Any]= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__: Optional[int]= os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: List[Any]= self.preprocess_text(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.sp_model.encode(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Dict= [self.preprocess_text(lowerCAmelCase ) for t in text]
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.encode(lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE__: Dict= torch.tensor(lowerCAmelCase )
return token_ids
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.decode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[int]:
SCREAMING_SNAKE_CASE__: Tuple= [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE__: int= (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCAmelCase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCAmelCase )
| 64 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "swin"
_UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=32 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Union[str, Any] = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Any = len(_A )
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
_lowerCAmelCase : List[str] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[int] = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21 |
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:str ):
'''simple docstring'''
return "".join(chr(ord(__lowerCamelCase ) - 3_2 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 468 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A_ :
UpperCAmelCase__ = LEDConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=9_9 , __lowerCamelCase : Any=3_2 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : int=3_7 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=2_0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : str=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Dict=4 , ) -> Optional[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : Dict ) -> Optional[Any]:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
__magic_name__ = global_attention_mask
return config, inputs_dict
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
__magic_name__ = TFLEDModel(config=__lowerCamelCase ).get_decoder()
__magic_name__ = inputs_dict["input_ids"]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["attention_mask"][:1, :]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__magic_name__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str , __lowerCamelCase:List[Any] , __lowerCamelCase:Any=None , __lowerCamelCase:Dict=None , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _snake_case ( self : int ) -> Optional[int]:
__magic_name__ = TFLEDModelTester(self )
__magic_name__ = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) -> List[Any]:
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : List[str] ) -> str:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ = 2
__magic_name__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ = True
__magic_name__ = self.model_tester.seq_length
__magic_name__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : int ):
__magic_name__ = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Any ):
__magic_name__ = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__magic_name__ = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
pass
def _snake_case ( self : int ) -> str:
# TODO: Head-masking not yet implement
pass
def _lowerCAmelCase ( __lowerCamelCase:Optional[int] ):
'''simple docstring'''
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
lowercase = 1e-4
@slow
@require_tf
class A_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) -> List[str]:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ) -> Dict:
__magic_name__ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
__magic_name__ = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = model(**__lowerCamelCase )[0]
__magic_name__ = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
__magic_name__ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 468 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : bool , lowerCamelCase_ : list[int] , lowerCamelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCamelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , )
def UpperCAmelCase__ ( ):
__a : int = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__a : Optional[int] = math.log(len(lowerCamelCase_ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 47 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowercase , )
assert hasattr(self , "env" )
def snake_case__ ( self : Dict , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
snake_case_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="py36" , )
def snake_case__ ( self : Union[str, Any] , __lowercase : Tuple ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowercase )
| 376 | 0 |
'''simple docstring'''
import os
import string
import sys
lowerCamelCase_ : str = 1 << 8
lowerCamelCase_ : int = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase_ : Dict = KEYMAP['''up''']
lowerCamelCase_ : Union[str, Any] = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : List[str] = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase_ : Union[str, Any] = ord(str(i))
def __magic_name__( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
UpperCamelCase__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowerCAmelCase ) == 0:
# Read the keystroke
UpperCamelCase__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCamelCase__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCamelCase__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_lowerCAmelCase )
if ord(_lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCamelCase__ = chr(KEYMAP["""esc"""] )
except KeyError:
UpperCamelCase__ = cha[1]
else:
UpperCamelCase__ = ch.decode(_lowerCAmelCase )
else:
UpperCamelCase__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCamelCase__ = sys.stdin.fileno()
UpperCamelCase__ = termios.tcgetattr(_lowerCAmelCase )
try:
tty.setraw(_lowerCAmelCase )
UpperCamelCase__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowerCAmelCase , termios.TCSADRAIN , _lowerCAmelCase )
return ch
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = get_raw_chars()
if ord(_lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowerCAmelCase ) == KEYMAP["esc"]:
UpperCamelCase__ = get_raw_chars()
if ord(_lowerCAmelCase ) == KEYMAP["mod_int"]:
UpperCamelCase__ = get_raw_chars()
if ord(_lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 701 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = "deformable_detr"
__a : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowercase : List[Any]=True , lowercase : Tuple=None , lowercase : Tuple=3 , lowercase : List[str]=3_0_0 , lowercase : List[Any]=1_0_2_4 , lowercase : List[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Union[str, Any]=8 , lowercase : Optional[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Any=8 , lowercase : List[str]=0.0 , lowercase : Any=True , lowercase : Union[str, Any]="relu" , lowercase : Dict=2_5_6 , lowercase : Optional[int]=0.1 , lowercase : Optional[int]=0.0 , lowercase : List[str]=0.0 , lowercase : Any=0.0_2 , lowercase : Optional[int]=1.0 , lowercase : Tuple=True , lowercase : Optional[int]=False , lowercase : Any="sine" , lowercase : List[str]="resnet50" , lowercase : List[str]=True , lowercase : Optional[int]=False , lowercase : int=4 , lowercase : str=4 , lowercase : Dict=4 , lowercase : int=False , lowercase : List[Any]=3_0_0 , lowercase : List[Any]=False , lowercase : Dict=1 , lowercase : int=5 , lowercase : List[Any]=2 , lowercase : List[str]=1 , lowercase : Tuple=1 , lowercase : Dict=5 , lowercase : Dict=2 , lowercase : Dict=0.1 , lowercase : List[str]=0.2_5 , lowercase : Tuple=False , **lowercase : str , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(lowercase )
UpperCamelCase__ = use_timm_backbone
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = backbone
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = dilation
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
UpperCamelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def A ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : Tuple ) -> int:
'''simple docstring'''
return self.d_model
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 265 | 0 |
"""simple docstring"""
class lowercase__ : # Public class to implement a graph
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : str = row
_lowerCamelCase : Dict = col
_lowerCamelCase : List[str] = graph
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
# Checking all 8 elements surrounding nth element
_lowerCamelCase : Optional[int] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCamelCase : Union[str, Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCamelCase : Union[str, Any] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int: # And finally, count all islands.
_lowerCamelCase : Tuple = [[False for j in range(self.COL)] for i in range(self.ROW)]
_lowerCamelCase : int = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
count += 1
return count
| 88 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""LayoutLMv2FeatureExtractor"""]
lowercase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowercase_ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase : Any = numpy_to_pil(SCREAMING_SNAKE_CASE__ )
return images
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if images.ndim == 3:
__lowerCamelCase : int = images[None, ...]
__lowerCamelCase : int = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCamelCase : List[str] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__lowerCamelCase : Dict = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for image in images]
return pil_images
| 700 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase : Tuple = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class _lowerCAmelCase ( A__ , A__ ):
"""simple docstring"""
snake_case_ = 1
@register_to_config
def __init__( self : str , __snake_case : int = 20_00 , __snake_case : float = 0.15 , __snake_case : float = 0.01 , __snake_case : float = 13_48.0 , __snake_case : float = 1e-5 , __snake_case : int = 1 , )-> str:
# standard deviation of the initial noise distribution
snake_case = sigma_max
# setable values
snake_case = None
self.set_sigmas(__snake_case , __snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None )-> torch.FloatTensor:
return sample
def lowerCAmelCase ( self : List[str] , __snake_case : int , __snake_case : float = None , __snake_case : Union[str, torch.device] = None )-> Optional[Any]:
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case = torch.linspace(1 , __snake_case , __snake_case , device=__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : int , __snake_case : float = None , __snake_case : float = None , __snake_case : float = None )-> str:
snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__snake_case , __snake_case )
snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case = torch.exp(torch.linspace(math.log(__snake_case ) , math.log(__snake_case ) , __snake_case ) )
snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase ( self : str , __snake_case : List[str] , __snake_case : str )-> Optional[int]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase ( self : int , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.Generator] = None , __snake_case : bool = True , )-> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case = timesteps.to(self.discrete_sigmas.device )
snake_case = self.discrete_sigmas[timesteps].to(sample.device )
snake_case = self.get_adjacent_sigma(__snake_case , __snake_case ).to(sample.device )
snake_case = torch.zeros_like(__snake_case )
snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case = diffusion.unsqueeze(-1 )
snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=__snake_case , device=sample.device , dtype=sample.dtype )
snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__snake_case , prev_sample_mean=__snake_case )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.Generator] = None , __snake_case : bool = True , )-> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=__snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case = step_size.unsqueeze(-1 )
snake_case = sample + step_size * model_output
snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCAmelCase ( self : str , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , )-> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = timesteps.to(original_samples.device )
snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__snake_case ) * sigmas[:, None, None, None]
)
snake_case = noise + original_samples
return noisy_samples
def __len__( self : str )-> Any:
return self.config.num_train_timesteps
| 369 | 0 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowerCAmelCase : Union[str, Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowerCAmelCase : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowerCAmelCase : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_lowerCAmelCase : int = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
_lowerCAmelCase : int = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_lowerCAmelCase : Tuple = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
_lowerCAmelCase : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
_lowerCAmelCase : Optional[int] = np.expand_dims(test_image, axis=0)
_lowerCAmelCase : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowerCAmelCase : int = '''Normal'''
if result[0][0] == 1:
_lowerCAmelCase : Optional[int] = '''Abnormality detected''' | 386 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
_lowerCamelCase : Any = number_of_bytes // partitions
_lowerCamelCase : Any = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Tuple = i * bytes_per_partition + 1
_lowerCamelCase : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 386 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ : str = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ : Any = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase_ : str = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase_ : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase_ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase_ : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
lowerCAmelCase_ : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase_ : List[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase_ : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Union[bool, str] = False , lowercase__ : Union[bool, str] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , **lowercase__ : List[Any] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
_UpperCamelCase : Union[str, Any] = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
_UpperCamelCase : List[Any] = titles if not isinstance(lowercase__ , lowercase__ ) else [titles]
_UpperCamelCase : Any = texts if not isinstance(lowercase__ , lowercase__ ) else [texts]
_UpperCamelCase : List[Any] = len(lowercase__ )
_UpperCamelCase : Dict = questions if not isinstance(lowercase__ , lowercase__ ) else [questions] * n_passages
assert len(lowercase__ ) == len(
lowercase__ ), f'''There should be as many titles than texts but got {len(lowercase__ )} titles and {len(lowercase__ )} texts.'''
_UpperCamelCase : int = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ )["input_ids"]
_UpperCamelCase : Any = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ )["input_ids"]
_UpperCamelCase : int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__ )
]
}
if return_attention_mask is not False:
_UpperCamelCase : str = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCamelCase : str = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ )
def snake_case__ ( self : Tuple , lowercase__ : BatchEncoding , lowercase__ : DPRReaderOutput , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = reader_input["input_ids"]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = reader_output[:3]
_UpperCamelCase : Optional[int] = len(lowercase__ )
_UpperCamelCase : List[Any] = sorted(range(lowercase__ ) , reverse=lowercase__ , key=relevance_logits.__getitem__ )
_UpperCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_UpperCamelCase : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCamelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCamelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_UpperCamelCase : List[Any] = len(lowercase__ )
_UpperCamelCase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case__ ( self : List[str] , lowercase__ : List[int] , lowercase__ : List[int] , lowercase__ : int , lowercase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
_UpperCamelCase : List[str] = []
for start_index, start_score in enumerate(lowercase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCamelCase : List[Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
_UpperCamelCase : int = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
_UpperCamelCase : str = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 435 | '''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
_UpperCamelCase : int = min(UpperCAmelCase ,UpperCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 435 | 1 |
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase_ = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = '''openai-gpt'''
_UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: int , _SCREAMING_SNAKE_CASE: int=40478 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Any=1e-5 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: str="cls_index" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = afn
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = attn_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = summary_type
UpperCamelCase_ = summary_use_proj
UpperCamelCase_ = summary_activation
UpperCamelCase_ = summary_first_dropout
UpperCamelCase_ = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 371 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 259 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case , __snake_case=0.9_99 , __snake_case="cosine" , ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase_ = []
for i in range(__snake_case ):
UpperCamelCase_ = i / num_diffusion_timesteps
UpperCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class A ( lowerCamelCase_ , lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : Any = 2
@register_to_config
def __init__( self : Tuple , __UpperCAmelCase : int = 1000 , __UpperCAmelCase : float = 0.00_085 , __UpperCAmelCase : float = 0.012 , __UpperCAmelCase : str = "linear" , __UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : str = "linspace" , __UpperCAmelCase : int = 0 , ) -> Any:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase_ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase_ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCamelCase_ = 1.0 - self.betas
UpperCamelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = use_karras_sigmas
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase_ = self.timesteps
UpperCamelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase_ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
UpperCamelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None , __UpperCAmelCase : Optional[int] = None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = num_inference_steps
UpperCamelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase_ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCamelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase_ = np.log(__UpperCAmelCase )
UpperCamelCase_ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
if self.config.use_karras_sigmas:
UpperCamelCase_ = self._convert_to_karras(in_sigmas=__UpperCAmelCase , num_inference_steps=self.num_inference_steps )
UpperCamelCase_ = np.array([self._sigma_to_t(__UpperCAmelCase , __UpperCAmelCase ) for sigma in sigmas] )
UpperCamelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
UpperCamelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase )
UpperCamelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
UpperCamelCase_ = timesteps.to(__UpperCAmelCase , dtype=torch.floataa )
else:
UpperCamelCase_ = timesteps.to(device=__UpperCAmelCase )
# empty dt and derivative
UpperCamelCase_ = None
UpperCamelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase_ = defaultdict(__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = np.log(__UpperCAmelCase )
# get distribution
UpperCamelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase_ = low_idx + 1
UpperCamelCase_ = log_sigmas[low_idx]
UpperCamelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase_ = (low - log_sigma) / (low - high)
UpperCamelCase_ = np.clip(__UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
UpperCamelCase_ = (1 - w) * low_idx + w * high_idx
UpperCamelCase_ = t.reshape(sigma.shape )
return t
def lowercase__ ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = in_sigmas[-1].item()
UpperCamelCase_ = in_sigmas[0].item()
UpperCamelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase_ = np.linspace(0 , 1 , __UpperCAmelCase )
UpperCamelCase_ = sigma_min ** (1 / rho)
UpperCamelCase_ = sigma_max ** (1 / rho)
UpperCamelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return self.dt is None
def lowercase__ ( self : List[str] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : Union[float, torch.FloatTensor] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase_ = self.sigmas[step_index - 1]
UpperCamelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase_ = 0
UpperCamelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCamelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase_ = derivative
UpperCamelCase_ = dt
UpperCamelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase_ = (sample - pred_original_sample) / sigma_next
UpperCamelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase_ = self.dt
UpperCamelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
UpperCamelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase_ = self.timesteps.to(original_samples.device )
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
UpperCamelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase_ = sigma.unsqueeze(-1 )
UpperCamelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 559 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case , __snake_case=0.9_99 , __snake_case="cosine" , ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase_ = []
for i in range(__snake_case ):
UpperCamelCase_ = i / num_diffusion_timesteps
UpperCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class A ( lowerCamelCase_ , lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : Any = 2
@register_to_config
def __init__( self : Tuple , __UpperCAmelCase : int = 1000 , __UpperCAmelCase : float = 0.00_085 , __UpperCAmelCase : float = 0.012 , __UpperCAmelCase : str = "linear" , __UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : str = "linspace" , __UpperCAmelCase : int = 0 , ) -> Any:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase_ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase_ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
UpperCamelCase_ = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCamelCase_ = 1.0 - self.betas
UpperCamelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = use_karras_sigmas
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase_ = self.timesteps
UpperCamelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase_ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
UpperCamelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None , __UpperCAmelCase : Optional[int] = None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = num_inference_steps
UpperCamelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase_ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase_ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
UpperCamelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase_ = np.log(__UpperCAmelCase )
UpperCamelCase_ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
if self.config.use_karras_sigmas:
UpperCamelCase_ = self._convert_to_karras(in_sigmas=__UpperCAmelCase , num_inference_steps=self.num_inference_steps )
UpperCamelCase_ = np.array([self._sigma_to_t(__UpperCAmelCase , __UpperCAmelCase ) for sigma in sigmas] )
UpperCamelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
UpperCamelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase_ = torch.from_numpy(__UpperCAmelCase )
UpperCamelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
UpperCamelCase_ = timesteps.to(__UpperCAmelCase , dtype=torch.floataa )
else:
UpperCamelCase_ = timesteps.to(device=__UpperCAmelCase )
# empty dt and derivative
UpperCamelCase_ = None
UpperCamelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase_ = defaultdict(__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = np.log(__UpperCAmelCase )
# get distribution
UpperCamelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCamelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCamelCase_ = low_idx + 1
UpperCamelCase_ = log_sigmas[low_idx]
UpperCamelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase_ = (low - log_sigma) / (low - high)
UpperCamelCase_ = np.clip(__UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
UpperCamelCase_ = (1 - w) * low_idx + w * high_idx
UpperCamelCase_ = t.reshape(sigma.shape )
return t
def lowercase__ ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = in_sigmas[-1].item()
UpperCamelCase_ = in_sigmas[0].item()
UpperCamelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCamelCase_ = np.linspace(0 , 1 , __UpperCAmelCase )
UpperCamelCase_ = sigma_min ** (1 / rho)
UpperCamelCase_ = sigma_max ** (1 / rho)
UpperCamelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return self.dt is None
def lowercase__ ( self : List[str] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : Union[float, torch.FloatTensor] , __UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase_ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
UpperCamelCase_ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase_ = self.sigmas[step_index]
UpperCamelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCamelCase_ = self.sigmas[step_index - 1]
UpperCamelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase_ = 0
UpperCamelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCamelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCamelCase_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
UpperCamelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCamelCase_ = derivative
UpperCamelCase_ = dt
UpperCamelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCamelCase_ = (sample - pred_original_sample) / sigma_next
UpperCamelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCamelCase_ = self.dt
UpperCamelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
UpperCamelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCamelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCamelCase_ = self.timesteps.to(original_samples.device )
UpperCamelCase_ = timesteps.to(original_samples.device )
UpperCamelCase_ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
UpperCamelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase_ = sigma.unsqueeze(-1 )
UpperCamelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 559 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__magic_name__ : str =logging.get_logger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ) -> None:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 664 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 1 |
from __future__ import annotations
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 25_00_04
__UpperCAmelCase = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MBartaaTokenizer
UpperCAmelCase_ =MBartaaTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =True
def _UpperCamelCase ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''<s>'''
SCREAMING_SNAKE_CASE_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1054 )
def _UpperCamelCase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
SCREAMING_SNAKE_CASE_ = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _UpperCamelCase ( self ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_ = tokenizer_r.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ="facebook/mbart-large-50-one-to-many-mmt"
UpperCAmelCase_ =[
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase_ =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCAmelCase_ =[EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _UpperCamelCase ( cls ) -> Any:
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def _UpperCamelCase ( self ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertIn(_A , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _A )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[0] , _A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_A ) , _A )
def _UpperCamelCase ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ = MBartaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = targets['''input_ids''']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 597 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
A = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Union[str, Any] , **snake_case__ : int ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase = deprecated_arg[3:]
setattr(self , snake_case__ , not kwargs.pop(snake_case__ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCamelCase = kwargs.pop('torchscript' , self.torchscript )
_lowerCamelCase = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
_lowerCamelCase = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case__ )
lowerCAmelCase_ = field(default=__snake_case ,metadata={'help': 'Trace the models using torchscript'} )
lowerCAmelCase_ = field(default=__snake_case ,metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCAmelCase_ = field(
default='O1' ,metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} ,)
@cached_property
def _snake_case ( self : Optional[int] ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
_lowerCamelCase = torch.device('cpu' )
_lowerCamelCase = 0
elif is_torch_tpu_available():
_lowerCamelCase = xm.xla_device()
_lowerCamelCase = 0
else:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = torch.cuda.device_count()
return device, n_gpu
@property
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _snake_case ( self : Optional[int] ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _snake_case ( self : Optional[int] ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _snake_case ( self : Optional[Any] ) -> str:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _snake_case ( self : int ) -> List[str]:
return self.n_gpu > 0 | 544 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _A = "AAPL" ) -> str:
lowercase : Optional[Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowercase : str = BeautifulSoup(requests.get(_A ).text , """html.parser""" )
lowercase : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 264 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :List[str] = ""
__lowercase :List[str] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **UpperCamelCase__ )
lowerCamelCase_ = repo_info
lowerCamelCase_ = token
lowerCamelCase_ = None
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
if self.dir_cache is None:
lowerCamelCase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {'''name''': str(UpperCamelCase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = "rb" , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCamelCase_ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowerCAmelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self._get_dirs()
lowerCamelCase_ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
self._get_dirs()
lowerCamelCase_ = PurePosixPath(path.strip('''/''' ) )
lowerCamelCase_ = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ = PurePosixPath(p.strip('''/''' ) )
lowerCamelCase_ = p.parent
if root == path:
lowerCamelCase_ = f
lowerCamelCase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out ) | 66 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase : Optional[int] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowercase : Dict = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Dict = VOCAB_FILES_NAMES
__lowercase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Optional[int] = ["input_ids", "attention_mask"]
__lowercase :Any = BartTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ = '''post_processor'''
lowerCamelCase_ = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase_ = tuple(state['''cls'''] )
lowerCamelCase_ = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase_ = trim_offsets
lowerCamelCase_ = True
if changes_to_apply:
lowerCamelCase_ = getattr(UpperCamelCase__ , state.pop('''type''' ) )
lowerCamelCase_ = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase_ = value
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 66 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowercase = logging.getLogger(__name__)
class a_ ( UpperCAmelCase__ ):
def __init__( self : str , __lowerCAmelCase : str=-1 ):
# in NER datasets, the last column is usually reserved for NER label
__snake_case = label_idx
def lowercase__ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[Split, str] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = mode.value
__snake_case = os.path.join(__lowerCAmelCase , F'{mode}.txt' )
__snake_case = 1
__snake_case = []
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
__snake_case = []
__snake_case = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
__snake_case = []
__snake_case = []
else:
__snake_case = line.split(' ' )
words.append(splits[0] )
if len(__lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
return examples
def lowercase__ ( self : str , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ):
__snake_case = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__snake_case = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__lowerCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : str ):
if path:
with open(__lowerCAmelCase , 'r' ) as f:
__snake_case = f.read().splitlines()
if "O" not in labels:
__snake_case = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a_ ( UpperCAmelCase__ ):
def __init__( self : List[Any] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowercase__ ( self : int , __lowerCAmelCase : str ):
if path:
with open(__lowerCAmelCase , 'r' ) as f:
__snake_case = f.read().splitlines()
if "O" not in labels:
__snake_case = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Union[Split, str] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = mode.value
__snake_case = os.path.join(__lowerCAmelCase , F'{mode}.txt' )
__snake_case = 1
__snake_case = []
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__lowerCAmelCase ):
__snake_case = []
__snake_case = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
return examples
def lowercase__ ( self : List[str] , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ):
__snake_case = 0
for sentence in parse_incr(__lowerCAmelCase ):
__snake_case = preds_list[example_id]
__snake_case = ''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(__lowerCAmelCase )
example_id += 1
def lowercase__ ( self : List[str] , __lowerCAmelCase : str ):
if path:
with open(__lowerCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 356 |
'''simple docstring'''
import numpy
class a_ :
def __init__( self : Any , __lowerCAmelCase : numpy.ndarray , __lowerCAmelCase : numpy.ndarray ):
__snake_case = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case = numpy.random.rand(3 , 1 )
# Real output values provided.
__snake_case = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case = numpy.zeros(output_array.shape )
def lowercase__ ( self : Optional[Any] ):
__snake_case = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self : List[str] ):
__snake_case = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__snake_case = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__snake_case = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : numpy.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
__snake_case = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : numpy.ndarray ):
__snake_case = input_arr
__snake_case = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__snake_case = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__snake_case = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase__ ( a ):
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( a ):
return (value) * (1 - (value))
def lowerCamelCase__ ( ):
__snake_case = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=10 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 356 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
snake_case__ : int = 1
@register_to_config
def __init__( self : str , __lowerCAmelCase : int = 2_0_0_0 , __lowerCAmelCase : float = 0.15 , __lowerCAmelCase : float = 0.01 , __lowerCAmelCase : float = 13_48.0 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = sigma_max
# setable values
_lowerCamelCase : Dict = None
self.set_sigmas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[int] = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : Union[str, torch.device] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : Optional[int] = torch.linspace(1 , __lowerCAmelCase , __lowerCAmelCase , device=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None , __lowerCAmelCase : float = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : int = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : Optional[int] = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) , math.log(__lowerCAmelCase ) , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
_lowerCamelCase : Tuple = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Optional[int] = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Any = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : int = self.get_adjacent_sigma(__lowerCAmelCase , __lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Any = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : List[Any] = diffusion.unsqueeze(-1 )
_lowerCamelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCAmelCase , device=sample.device , dtype=sample.dtype )
_lowerCamelCase : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase , prev_sample_mean=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_lowerCamelCase : str = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : str = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : torch.FloatTensor , ):
"""simple docstring"""
_lowerCamelCase : Dict = timesteps.to(original_samples.device )
_lowerCamelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 83 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str], *_UpperCAmelCase : List[Any], **_UpperCAmelCase : str ) -> None:
"""simple docstring"""
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead.", _SCREAMING_SNAKE_CASE, )
super().__init__(*_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
| 713 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A =logging.get_logger(__name__)
__A ={'tokenizer_file': 'tokenizer.json'}
__A ={
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class _snake_case ( a__ ):
lowerCAmelCase :List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase :int = ['''input_ids''', '''attention_mask''']
lowerCAmelCase :Optional[int] = None
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<unk>" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase) != add_prefix_space:
UpperCAmelCase__ : List[Any] = getattr(_lowerCamelCase , pre_tok_state.pop("""type"""))
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : int = pre_tok_class(**_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = add_prefix_space
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Dict = kwargs.get("""is_split_into_words""" , _lowerCamelCase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""")
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : int = kwargs.get("""is_split_into_words""" , _lowerCamelCase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""")
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Dict = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase)
return tuple(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase) + [self.eos_token_id])
if len(_lowerCamelCase) > self.model_max_length:
UpperCAmelCase__ : int = input_ids[-self.model_max_length :]
return input_ids | 407 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCamelCase ( UpperCamelCase__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=5_0 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=5_6 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=5_0_0 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4_0_0_0 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : Any = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase__ : str = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : str = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : int = pickle.load(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : Tuple = 0.0 # do not predict special tokens
UpperCAmelCase__ : Any = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
UpperCAmelCase__ : int = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCAmelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase__ : Any = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main() | 407 | 1 |
'''simple docstring'''
from itertools import count
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : int = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 1_6
A = 3_2
def lowerCamelCase ( UpperCamelCase : Accelerator , UpperCamelCase : int = 16 ) -> Any:
_lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase = datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase = 8
else:
_lowerCamelCase = None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
_lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
_lowerCamelCase = 2
# New Code #
_lowerCamelCase = int(args.gradient_accumulation_steps )
_lowerCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
_lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase = config['lr']
_lowerCamelCase = int(config['num_epochs'] )
_lowerCamelCase = int(config['seed'] )
_lowerCamelCase = int(config['batch_size'] )
_lowerCamelCase = evaluate.load('glue' , 'mrpc' )
set_seed(_snake_case )
_lowerCamelCase , _lowerCamelCase = get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase = AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
_lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=1_00 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
_lowerCamelCase = model(**_snake_case )
_lowerCamelCase = output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase = model(**_snake_case )
_lowerCamelCase = outputs.logits.argmax(dim=-1 )
_lowerCamelCase , _lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
_lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _snake_case )
def lowerCamelCase ( ) -> str:
_lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main() | 544 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCAmelCase : List[str] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
_lowerCAmelCase : Optional[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case , __snake_case=None , __snake_case="base" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__snake_case )
__a =0
__a =Path(self.hparams.output_dir )
__a =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__snake_case , **__snake_case , )
else:
__a =config
__a =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __snake_case , __snake_case ):
assert hasattr(self.config , __snake_case ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __snake_case , getattr(self.hparams , __snake_case ) )
if tokenizer is None:
__a =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__snake_case , )
else:
__a =tokenizer
__a =MODEL_MODES[mode]
if model is None:
__a =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__snake_case , )
else:
__a =model
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =self.model_type.from_pretrained(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =arg_to_scheduler[self.hparams.lr_scheduler]
__a =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model
__a =['bias', 'LayerNorm.weight']
__a =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a =Adafactor(
__snake_case , lr=self.hparams.learning_rate , scale_parameter=__snake_case , relative_step=__snake_case )
else:
__a =AdamW(
__snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a =optimizer
__a =self.get_lr_scheduler()
return [optimizer], [scheduler]
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return self.validation_step(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.validation_end(__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if stage == "test":
__a =len(self.test_dataloader().dataset )
else:
__a =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__snake_case )
__a =len(self.train_dataloader().dataset )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = False ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.train_loader
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__snake_case , list(filter(__snake_case , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.output_dir.joinpath('best_tfmr' )
__a =self.step_count
self.model.save_pretrained(__snake_case )
self.tokenizer.save_pretrained(__snake_case )
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__snake_case , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'cache' ) , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__snake_case , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__snake_case , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__snake_case , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__snake_case , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__snake_case , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__snake_case , metavar=__snake_case , type=__snake_case , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__snake_case , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__snake_case )
parser.add_argument('--train_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--eval_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--adafactor' , action='store_true' )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__snake_case )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =trainer.lr_schedulers[0]['scheduler']
__a ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a =trainer.callback_metrics
# Log results
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a =trainer.callback_metrics
# Log and save results to file
__a =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__snake_case , 'w' ) as writer:
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=_snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCamelCase_( _snake_case : BaseTransformer , _snake_case : argparse.Namespace , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=True , _snake_case : Dict=[] , _snake_case : List[str]=None , _snake_case : Any=None , **_snake_case : str , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__a =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__a =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_snake_case )
if logging_callback is None:
__a =LoggingCallback()
__a ={}
if args.fpaa:
__a =16
if args.gpus > 1:
__a ='auto'
__a ='ddp'
__a =args.accumulate_grad_batches
__a =None
__a ='auto'
__a =pl.Trainer.from_argparse_args(
_snake_case , weights_summary=_snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **_snake_case , )
if args.do_train:
trainer.fit(_snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 242 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if name is None:
_lowercase: str = None
else:
_lowercase: Optional[Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
_lowercase: Union[str, Any] = fmt.format(_UpperCamelCase )
# Print and recurse (if needed).
if isinstance(_UpperCamelCase , _UpperCamelCase ):
if msg is not None:
print(_UpperCamelCase )
for k in val.keys():
recursive_print(_UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCamelCase , torch.Tensor ):
print(_UpperCamelCase , ''':''' , val.size() )
else:
print(_UpperCamelCase , ''':''' , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowercase: Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowercase: Any = param.view(*_UpperCamelCase )
_lowercase: Optional[Any] = param.transpose(0 , 2 )
_lowercase: List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowercase: int = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowercase: Optional[Any] = param.view(*_UpperCamelCase )
_lowercase: Dict = param.transpose(0 , 1 ).contiguous()
_lowercase: Optional[Any] = param.view(*_UpperCamelCase )
return param
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[Any] = {}
# old versions did not store training args
_lowercase: int = input_state_dict.get('''args''' , _UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowercase: str = ds_args.padded_vocab_size
_lowercase: Dict = ds_args.max_position_embeddings
_lowercase: List[str] = ds_args.hidden_size
_lowercase: List[Any] = ds_args.num_layers
_lowercase: Optional[int] = ds_args.num_attention_heads
_lowercase: Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowercase: Optional[int] = config.n_head
# The hidden_size per head.
_lowercase: Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowercase: List[str] = input_state_dict['''checkpoint_version''']
else:
_lowercase: List[Any] = 0.0
# The model.
_lowercase: Dict = input_state_dict['''model''']
# The language model.
_lowercase: str = model['''language_model''']
# The embeddings.
_lowercase: List[Any] = lm['''embedding''']
# The word embeddings.
_lowercase: Tuple = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_lowercase: Any = word_embeddings[: config.vocab_size, :]
_lowercase: Optional[Any] = word_embeddings
# The position embeddings.
_lowercase: Optional[Any] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowercase: List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_lowercase: Any = pos_embeddings
# The transformer.
_lowercase: Optional[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_lowercase: str = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
_lowercase: Any = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowercase: str = layer_re.match(_UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowercase: Optional[Any] = int(m.group(1 ) )
# The name of the operation.
_lowercase: Tuple = m.group(2 )
# Is it a weight or a bias?
_lowercase: Any = m.group(3 )
# The name of the layer.
_lowercase: Dict = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
_lowercase: Any = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
_lowercase: Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowercase: Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowercase: Any = torch.tensor(-1e4 , dtype=torch.floataa )
_lowercase: Tuple = masked_bias
_lowercase: List[str] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowercase: str = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowercase: Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowercase: List[Any] = fix_query_key_value_ordering(_UpperCamelCase , _UpperCamelCase , 3 , _UpperCamelCase , _UpperCamelCase )
# Store. No change of shape.
_lowercase: Union[str, Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowercase: str = megatron_to_transformers[op_name]
_lowercase: str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowercase: List[Any] = megatron_to_transformers[op_name]
_lowercase: Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowercase: str = transformer['''final_layernorm.weight''']
_lowercase: Dict = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowercase: Dict = word_embeddings
# It should be done!
return output_state_dict
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=_UpperCamelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_UpperCamelCase , help='''An optional config json file describing the pre-trained model.''' , )
_lowercase: int = parser.parse_args()
# Extract the basename.
_lowercase: Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
_lowercase: str = torch.load(_UpperCamelCase , map_location='''cpu''' )
else:
_lowercase: Optional[int] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
_lowercase: Dict = input_state_dict.get('''args''' , _UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowercase: List[str] = '''gelu_fast'''
elif ds_args.openai_gelu:
_lowercase: Optional[int] = '''gelu_new'''
else:
_lowercase: Any = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_lowercase: Optional[int] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_lowercase: List[str] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_UpperCamelCase , summary_activation=_UpperCamelCase , summary_proj_to_labels=_UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_lowercase: Optional[int] = GPTaConfig.from_json_file(args.config_file )
_lowercase: str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
_lowercase: Optional[int] = convert_megatron_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCamelCase , _UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowercase: Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowercase: int = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_lowercase: str = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_lowercase: str = '''gpt2'''
_lowercase: List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase )
_lowercase: Any = type(_UpperCamelCase ).__name__
_lowercase: Tuple = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_UpperCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_UpperCamelCase )
# Store the state_dict to file.
_lowercase: int = os.path.join(_UpperCamelCase , '''pytorch_model.bin''' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(_UpperCamelCase , _UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 272 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
lowercase_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(UpperCAmelCase__ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
lowercase_ = qkv_bias
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 3_6_4 if """coco""" in model_name else 2_2_4
lowercase_ = InstructBlipVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
lowercase_ = InstructBlipConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ , qformer_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowercase_ = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowercase_ , lowercase_ = get_blipa_config(UpperCAmelCase__ )
lowercase_ = InstructBlipForConditionalGeneration(UpperCAmelCase__ ).eval()
lowercase_ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowercase_ , lowercase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowercase_ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowercase_ , lowercase_ , lowercase_ = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ = original_model.state_dict()
lowercase_ = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(UpperCAmelCase__ )
if key.startswith("""Qformer.bert""" ):
lowercase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowercase_ = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowercase_ = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowercase_ = key.replace("""t5""" , """language""" )
lowercase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
lowercase_ = load_demo_image()
lowercase_ = """What is unusual about this image?"""
# create processor
lowercase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
lowercase_ = InstructBlipProcessor(
image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , qformer_tokenizer=UpperCAmelCase__ , )
lowercase_ = processor(images=UpperCAmelCase__ , text=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
lowercase_ = vis_processors["""eval"""](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowercase_ = hf_model(**UpperCAmelCase__ ).logits
else:
lowercase_ = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowercase_ = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase__ )
lowercase_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
lowercase_ = hf_model(**UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCAmelCase__ , atol=UpperCAmelCase__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowercase_ = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowercase_ = hf_model.generate(
**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ = 2
print("""Original generation:""" , UpperCAmelCase__ )
lowercase_ = processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase_ = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 412 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = ['model.decoder.embed_positions.weights']
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if "emb" in name:
lowercase_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = list(state_dict.keys() )
lowercase_ = {}
for key in keys:
lowercase_ = state_dict.pop(UpperCAmelCase__ )
lowercase_ = rename_keys(UpperCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ = val[:hidden_size, :]
lowercase_ = val[hidden_size : 2 * hidden_size, :]
lowercase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ = val
else:
lowercase_ = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if checkpoint == "small":
# default config values
lowercase_ = 1_0_2_4
lowercase_ = 2_4
lowercase_ = 1_6
elif checkpoint == "medium":
lowercase_ = 1_5_3_6
lowercase_ = 4_8
lowercase_ = 2_4
elif checkpoint == "large":
lowercase_ = 2_0_4_8
lowercase_ = 4_8
lowercase_ = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , )
return config
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="cpu" ):
lowercase_ = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowercase_ = decoder_config_from_checkpoint(UpperCAmelCase__ )
lowercase_ = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ = rename_state_dict(
UpperCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowercase_ = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ = MusicgenForCausalLM(UpperCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ )
# check we can do a forward pass
lowercase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# set the appropriate bos/pad token ids
lowercase_ = 2_0_4_8
lowercase_ = 2_0_4_8
# set other default generation config params
lowercase_ = int(3_0 * audio_encoder.config.frame_rate )
lowercase_ = True
lowercase_ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase__ )
processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 412 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCamelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = _distribute_shards(**__lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[Any] = _split_gen_kwargs(__lowerCamelCase , __lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
if expected is RuntimeError:
with pytest.raises(__lowerCamelCase ):
_number_of_shards_in_gen_kwargs(__lowerCamelCase )
else:
lowercase__ : str = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
assert out == expected
| 122 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
return base * power(__lowerCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase_ = int(input('Enter the base: ').strip())
lowerCAmelCase_ = int(input('Enter the exponent: ').strip())
lowerCAmelCase_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase_ = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 122 | 1 |
from __future__ import annotations
import requests
__magic_name__ : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a_ ( __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "new" , __lowerCAmelCase = None ):
lowerCAmelCase__ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__a ) - valid_terms ) ):
lowerCAmelCase__ = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__a )
lowerCAmelCase__ = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
lowerCAmelCase__ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__a )}
lowerCAmelCase__ = {}
for id_ in range(__a ):
lowerCAmelCase__ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 615 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] =int(__a )
assert noofclusters < len(__a )
# Find out the dimensionality
SCREAMING_SNAKE_CASE : str =len(vectors[0] )
# Will help select random centroids from among the available vectors
SCREAMING_SNAKE_CASE : Optional[Any] =list(range(len(__a ) ) )
shuffle(__a )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
SCREAMING_SNAKE_CASE : List[Any] =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
SCREAMING_SNAKE_CASE : Optional[Any] =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
SCREAMING_SNAKE_CASE : Tuple =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(__a )
]
##These nodes will assign the centroid Variables the appropriate
##values
SCREAMING_SNAKE_CASE : Optional[int] =tf.placeholder('''float64''' , [dim] )
SCREAMING_SNAKE_CASE : Optional[Any] =[]
for centroid in centroids:
cent_assigns.append(tf.assign(__a , __a ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
SCREAMING_SNAKE_CASE : Optional[int] =[tf.Variable(0 ) for i in range(len(__a ) )]
##These nodes will assign an assignment Variable the appropriate
##value
SCREAMING_SNAKE_CASE : str =tf.placeholder('''int32''' )
SCREAMING_SNAKE_CASE : str =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(__a , __a ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
SCREAMING_SNAKE_CASE : List[Any] =tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
SCREAMING_SNAKE_CASE : List[Any] =tf.reduce_mean(__a , 0 )
##Node for computing Euclidean distances
# Placeholders for input
SCREAMING_SNAKE_CASE : Any =tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : int =tf.placeholder('''float''' , [dim] )
SCREAMING_SNAKE_CASE : Tuple =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__a , __a ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
SCREAMING_SNAKE_CASE : int =tf.placeholder('''float''' , [noofclusters] )
SCREAMING_SNAKE_CASE : int =tf.argmin(__a , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
SCREAMING_SNAKE_CASE : Optional[Any] =tf.initialize_all_variables()
# Initialize all variables
sess.run(__a )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
SCREAMING_SNAKE_CASE : Any =100
for _ in range(__a ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__a ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
SCREAMING_SNAKE_CASE : str =[
sess.run(__a , feed_dict={va: vect, va: sess.run(__a )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
SCREAMING_SNAKE_CASE : List[str] =sess.run(
__a , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__a ):
# Collect all the vectors assigned to this cluster
SCREAMING_SNAKE_CASE : Any =[
vectors[i]
for i in range(len(__a ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
SCREAMING_SNAKE_CASE : List[Any] =sess.run(
__a , feed_dict={mean_input: array(__a )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
SCREAMING_SNAKE_CASE : Any =sess.run(__a )
SCREAMING_SNAKE_CASE : Optional[int] =sess.run(__a )
return centroids, assignments
| 258 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCamelCase : Any = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , **lowerCamelCase__ : List[Any] ):
a__ : int = feature_size
a__ : List[str] = sampling_rate
a__ : Optional[Any] = padding_value
a__ : List[Any] = kwargs.pop("padding_side" , "right" )
a__ : Optional[int] = kwargs.pop("return_attention_mask" , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
a__ : Union[str, Any] = processed_features[self.model_input_names[0]]
a__ : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
a__ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ : List[str] = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ : Tuple = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
a__ : Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
a__ : List[Any] = "tf"
elif is_torch_tensor(lowerCamelCase__ ):
a__ : int = "pt"
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
a__ : str = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCamelCase__ )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ : str = to_numpy(lowerCamelCase__ )
else:
a__ : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ : Union[str, Any] = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
a__ : Tuple = processed_features[self.model_input_names[0]]
a__ : Optional[Any] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ : Dict = []
for i in range(lowerCamelCase__ ):
a__ : int = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ : Any = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ : Any = PaddingStrategy.MAX_LENGTH
a__ : Optional[int] = {}
for i in range(lowerCamelCase__ ):
# padding
a__ : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ : Dict = []
if value.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ : Tuple = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : Any = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ : Optional[int] = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
a__ : List[str] = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
a__ : Tuple = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ : List[Any] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ : List[str] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ : List[str] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : List[str] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
a__ : Optional[int] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ : List[str] = processed_features["attention_mask"][:max_length]
return processed_features
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=None ):
# Get padding strategy
if padding is not False:
if padding is True:
a__ : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Optional[Any] = padding
else:
a__ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 151 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
"""simple docstring"""
_lowercase = (DPMSolverSinglestepScheduler,)
_lowercase = (('num_inference_steps', 2_5),)
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = dict(self.forward_default_kwargs )
a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Any = self.dummy_sample
a__ : int = 0.1 * sample
a__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a__, a__ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : List[str] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any]=0 , **lowerCamelCase__ : Optional[int] ):
a__ : Tuple = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Union[str, Any] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config()
a__ : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Union[str, Any] ):
if scheduler is None:
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
a__ : str = scheduler_class(**lowerCamelCase__ )
a__ : List[Any] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : Any = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _UpperCamelCase( self : str ):
a__ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Optional[Any] = 50
a__ : List[str] = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : int = self.full_loop(scheduler=lowerCamelCase__ )
a__ : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
a__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
a__ : Dict = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase( self : str ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _UpperCamelCase( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = self.full_loop()
a__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
a__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
a__ : int = scheduler_class(**lowerCamelCase__ )
a__ : int = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 151 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : bool = True , __magic_name__ : float = math.inf , __magic_name__ : float = -math.inf , __magic_name__ : float = math.inf , __magic_name__ : float = -math.inf , __magic_name__ : bool = False , __magic_name__ : float = 1_00 , __magic_name__ : float = 0.01 , __magic_name__ : float = 1 , ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = search_prob
snake_case__ : Union[str, Any] = start_temperate
snake_case__ : Union[str, Any] = []
snake_case__ : Any = 0
snake_case__ : Dict = None
while not search_end:
snake_case__ : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case__ : Any = current_state
scores.append(__magic_name__ )
iterations += 1
snake_case__ : str = None
snake_case__ : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case__ : Any = random.randint(0 , len(__magic_name__ ) - 1 ) # picking a random neighbor
snake_case__ : Optional[Any] = neighbors.pop(__magic_name__ )
snake_case__ : str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case__ : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case__ : Dict = picked_neighbor
else:
snake_case__ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case__ : List[str] = picked_neighbor
snake_case__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case__ : List[Any] = True
else:
snake_case__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__magic_name__ ) , __magic_name__ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A_ : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
A_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
A_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
| 38 |
from collections import Counter
from timeit import timeit
def _A ( SCREAMING_SNAKE_CASE : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _A ( SCREAMING_SNAKE_CASE : str = "" ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return True
a__ : Any =input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a__ : dict[str, int] ={}
for character in lower_case_input_str:
a__ : Union[str, Any] =character_freq_dict.get(SCREAMING_SNAKE_CASE , 0 ) + 1
a__ : Union[str, Any] =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : str = "" ):
"""simple docstring"""
print("\nFor string = " , SCREAMING_SNAKE_CASE , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
UpperCAmelCase : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 563 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_UpperCamelCase : Tuple ="\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_UpperCamelCase : List[Any] ="\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_UpperCamelCase : int ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def _UpperCAmelCase ( self , a , a , a=None ) -> str:
return {
"matthews_correlation": float(matthews_corrcoef(a , a , sample_weight=a ) ),
}
| 710 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645 | 0 |
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( __lowercase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __lowercase , )
| 612 |
from collections.abc import Sequence
def __A(lowerCAmelCase = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_UpperCamelCase = nums[0]
for i in range(1 , len(lowerCAmelCase ) ):
_UpperCamelCase = nums[i]
_UpperCamelCase = max(lowerCAmelCase , ans + num , lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ = int(input("Enter number of elements : ").strip())
lowerCamelCase__ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 612 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , lowercase : Tuple , lowercase : Optional[int]=1_3 , lowercase : List[str]=7 , lowercase : Optional[Any]=True , lowercase : int=True , lowercase : int=False , lowercase : Union[str, Any]=True , lowercase : Optional[int]=9_9 , lowercase : Any=6_4 , lowercase : List[str]=5 , lowercase : Tuple=4 , lowercase : List[Any]=6_4 , lowercase : Union[str, Any]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Any=5_1_2 , lowercase : str=1_6 , lowercase : Dict=2 , lowercase : Optional[Any]=0.0_2 , lowercase : Any=3 , lowercase : List[str]=4 , lowercase : Tuple=None , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def A ( self : Dict ) -> str:
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def A ( self : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : Dict , lowercase : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Dict , lowercase : str ) -> int:
'''simple docstring'''
UpperCamelCase__ = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ = model(snake_case__ , snake_case__ )
UpperCamelCase__ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int , lowercase : Dict , lowercase : Tuple , lowercase : Tuple , lowercase : int , lowercase : int , lowercase : Dict ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : List[str] , lowercase : str , lowercase : Optional[Any] , lowercase : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : int , lowercase : str , lowercase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase__ = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(UpperCamelCase__) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
'''simple docstring'''
__a : Dict = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__a : Union[str, Any] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Union[str, Any] = False
__a : Union[str, Any] = True
def A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = MPNetModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def A ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
UpperCamelCase__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase__ = model(snake_case__ )[0]
UpperCamelCase__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase__ = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 719 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ : List[str] = TypeVar('''T''')
lowerCamelCase_ : Optional[int] = TypeVar('''U''')
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict , lowercase : T | None , lowercase : U | None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self : List[Any] ) -> str:
'''simple docstring'''
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ = ["""DoubleLinkedList"""]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(lowercase ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def A ( self : str , lowercase : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def A ( self : Any , lowercase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
__a : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : int , lowercase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self : Any ) -> str:
'''simple docstring'''
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Any , lowercase : T ) -> bool:
'''simple docstring'''
return key in self.cache
def A ( self : Tuple , lowercase : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def A ( self : Dict , lowercase : T , lowercase : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(lowercase )
@classmethod
def A ( cls : Optional[int] , lowercase : int = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(lowercase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(lowercase )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , """cache_info""" , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.