code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Any = """vit"""
def __init__( self : Any , lowercase_ : Optional[int]=768 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=12 , lowercase_ : List[Any]=3072 , lowercase_ : List[str]="gelu" , lowercase_ : List[Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Any=1E-12 , lowercase_ : Optional[Any]=224 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=3 , lowercase_ : List[str]=True , lowercase_ : str=16 , **lowercase_ : Optional[Any] , ):
super().__init__(**lowercase_ )
snake_case_ : Tuple = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Optional[int] = initializer_range
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Tuple = image_size
snake_case_ : List[Any] = patch_size
snake_case_ : str = num_channels
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : str = encoder_stride
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = version.parse("""1.11""")
@property
def _snake_case ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self : Optional[Any] ):
return 1E-4
| 264 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 1 |
"""simple docstring"""
from math import pi
def __A ( a_ :int , a_ :int) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 351 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model'''}
A = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
A = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
A = '''▁'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a : int = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
__a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__a : Tuple = do_lower_case
__a : Optional[Any] = remove_space
__a : Optional[Any] = keep_accents
__a : Union[str, Any] = vocab_file
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
__a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a : str = self.__dict__.copy()
__a : Tuple = None
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : Optional[Any] = {}
__a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.remove_space:
__a : Any = ''' '''.join(inputs.strip().split() )
else:
__a : Tuple = inputs
__a : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a : List[str] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase )
__a : Optional[int] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
__a : Optional[Any] = outputs.lower()
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = self.preprocess_text(_UpperCAmelCase )
__a : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
__a : int = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : Tuple = cur_pieces[1:]
else:
__a : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = []
__a : str = ''''''
__a : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__a : Tuple = True
__a : Tuple = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__a : Optional[int] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : int = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[str] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 188 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Union[str, Any]=3, lowerCamelCase : Optional[Any]=18, lowerCamelCase : Optional[int]=30, lowerCamelCase : List[str]=400, lowerCamelCase : Dict=True, lowerCamelCase : Optional[Any]=None, lowerCamelCase : List[Any]=True, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = ImageGPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ImageGPTImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''clusters''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, obj[key] ) )
else:
self.assertEqual(obj[key], lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(lowerCamelCase, '''image_processor.json''' )
image_processor_first.to_json_file(lowerCamelCase )
lowercase__ = self.image_processing_class.from_json_file(lowerCamelCase ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase )
lowercase__ = self.image_processing_class.from_pretrained(lowerCamelCase ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], lowerCamelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
def a ( ):
'''simple docstring'''
lowercase__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
lowercase__ = Image.open(dataset[4]['''file'''] )
lowercase__ = Image.open(dataset[5]['''file'''] )
lowercase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
lowercase__ = prepare_images()
# test non-batched
lowercase__ = image_processing(images[0], return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1_024) )
lowercase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), lowerCamelCase )
# test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1_024) )
lowercase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), lowerCamelCase )
| 207 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207 | 1 |
import heapq
import sys
import numpy as np
lowerCAmelCase__ : Optional[int] =tuple[int, int]
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = set()
def _A ( self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def _A ( self ):
'''simple docstring'''
return len(self.elements ) == 0
def _A ( self , _A , _A ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_A )
else:
# update
# print("update", item)
__SCREAMING_SNAKE_CASE = []
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _A ( self , _A ):
'''simple docstring'''
if item in self.set:
self.set.remove(_A )
__SCREAMING_SNAKE_CASE = []
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _A ( self ):
'''simple docstring'''
return self.elements[0][1]
def _A ( self ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(_A )
return (priority, item)
def __lowercase ( a__ , a__ ) -> int:
# euclidean distance
__SCREAMING_SNAKE_CASE = np.array(a__ )
__SCREAMING_SNAKE_CASE = np.array(a__ )
return np.linalg.norm(a - b )
def __lowercase ( a__ , a__ ) -> Optional[int]:
# integer division by time variable
return consistent_heuristic(a__ , a__ ) // t
def __lowercase ( a__ , a__ ) -> List[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowercase ( a__ , a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](a__ , a__ )
return ans
def __lowercase ( a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(a__ ):
for j in range(a__ ):
__SCREAMING_SNAKE_CASE = '*'
for i in range(a__ ):
for j in range(a__ ):
if (j, (n - 1) - i) in blocks:
__SCREAMING_SNAKE_CASE = '#'
__SCREAMING_SNAKE_CASE = '-'
__SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = x
# print(x)
__SCREAMING_SNAKE_CASE = '-'
__SCREAMING_SNAKE_CASE = back_pointer[x]
__SCREAMING_SNAKE_CASE = '-'
for i in range(a__ ):
for j in range(a__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
__SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(a__ , end=' ' )
__SCREAMING_SNAKE_CASE = back_pointer[x]
print(a__ )
sys.exit()
def __lowercase ( a__ ) -> Optional[int]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> str:
for itera in range(a__ ):
open_list[itera].remove_element(a__ )
# print("s", s)
# print("j", j)
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = s
__SCREAMING_SNAKE_CASE = (x - 1, y)
__SCREAMING_SNAKE_CASE = (x + 1, y)
__SCREAMING_SNAKE_CASE = (x, y + 1)
__SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a__ )
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = float('inf' )
if valid(a__ ) and g_function[neighbours] > g_function[s] + 1:
__SCREAMING_SNAKE_CASE = g_function[s] + 1
__SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(a__ , key(a__ , 0 , a__ , a__ ) )
if neighbours not in close_list_inad:
for var in range(1 , a__ ):
if key(a__ , a__ , a__ , a__ ) <= Wa * key(
a__ , 0 , a__ , a__ ):
open_list[j].put(
a__ , key(a__ , a__ , a__ , a__ ) )
def __lowercase ( ) -> Any:
__SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase__ : Dict ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase__ : Optional[Any] =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCAmelCase__ : str =make_common_ground()
lowerCAmelCase__ : Any =blocks_blk
# hyper parameters
lowerCAmelCase__ : Dict =1
lowerCAmelCase__ : Tuple =1
lowerCAmelCase__ : Optional[Any] =20
lowerCAmelCase__ : Optional[Any] =3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase__ : Optional[Any] =(0, 0)
lowerCAmelCase__ : str =(n - 1, n - 1)
lowerCAmelCase__ : List[Any] =1
def __lowercase ( a__ , a__ , a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
__SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = set()
for i in range(a__ ):
open_list.append(PriorityQueue() )
open_list[i].put(a__ , key(a__ , a__ , a__ , a__ ) )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , a__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(a__ , a__ , a__ )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(a__ )
expand_state(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_inad.append(a__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(a__ , a__ , a__ )
else:
__SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(a__ )
expand_state(
a__ , 0 , a__ , a__ , a__ , a__ , a__ , a__ , )
close_list_anchor.append(a__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : str ={'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 0 |
__lowerCamelCase : Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( lowerCAmelCase : float ):
"""simple docstring"""
assert type(lowerCAmelCase ) in (int, float) and decimal == int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = ""
SCREAMING_SNAKE_CASE_ : Any = False
if decimal < 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = divmod(lowerCAmelCase , 1_6 )
SCREAMING_SNAKE_CASE_ : List[Any] = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE_ : Optional[int] = "0x" + hexadecimal
if negative:
SCREAMING_SNAKE_CASE_ : Optional[int] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : Any = {'vocab_file': 'sentencepiece.bpe.model'}
__A : List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__A : Tuple = {
'camembert-base': 512,
}
__A : Union[str, Any] = '▁'
class __UpperCamelCase ( lowercase__ ):
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self :str ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple="<s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :Dict="</s>" ,_UpperCamelCase :Union[str, Any]="<s>" ,_UpperCamelCase :Tuple="<unk>" ,_UpperCamelCase :Union[str, Any]="<pad>" ,_UpperCamelCase :Dict="<mask>" ,_UpperCamelCase :int=["<s>NOTUSED", "</s>NOTUSED"] ,_UpperCamelCase :Optional[Dict[str, Any]] = None ,**_UpperCamelCase :List[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[Any] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token
snake_case_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCamelCase ,)
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
snake_case_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case_ : int = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
snake_case_ : str = len(self.fairseq_tokens_to_ids )
snake_case_ : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a__ ( self :List[str] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
snake_case_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self :Tuple ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ,_UpperCamelCase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def a__ ( self :Optional[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self :int ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def a__ ( self :str ):
snake_case_ : List[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self :Tuple ,_UpperCamelCase :str ):
return self.sp_model.encode(_UpperCamelCase ,out_type=_UpperCamelCase )
def a__ ( self :str ,_UpperCamelCase :Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_UpperCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_UpperCamelCase )
def a__ ( self :List[Any] ,_UpperCamelCase :Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : List[str] = []
snake_case_ : Union[str, Any] = """"""
snake_case_ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = []
else:
current_sub_tokens.append(_UpperCamelCase )
snake_case_ : Optional[Any] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __getstate__( self :Any ):
snake_case_ : List[Any] = self.__dict__.copy()
snake_case_ : str = None
return state
def __setstate__( self :Union[str, Any] ,_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
snake_case_ : Optional[int] = {}
snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self :str ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Union[str, Any] = os.path.join(
_UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase ,"""wb""" ) as fi:
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 8 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __UpperCamelCase ( nn.Module ):
def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Any = only_cross_attention
snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase )
else:
snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case_ : str = (
AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
)
snake_case_ : List[str] = Attention(
query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none
else:
snake_case_ : Any = None
snake_case_ : Optional[Any] = None
# 3. Feed-forward
snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase )
# let chunk size default to None
snake_case_ : Optional[int] = None
snake_case_ : Dict = 0
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ):
# Sets chunk feed-forward
snake_case_ : Optional[Any] = chunk_size
snake_case_ : Optional[Any] = dim
def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase )
elif self.use_ada_layer_norm_zero:
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype )
else:
snake_case_ : Optional[int] = self.norma(_UpperCamelCase )
snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case_ : Union[str, Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output
snake_case_ : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case_ : Any = (
self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase )
)
snake_case_ : List[Any] = self.attna(
_UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,)
snake_case_ : Tuple = attn_output + hidden_states
# 3. Feed-forward
snake_case_ : Optional[Any] = self.norma(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case_ : int = torch.cat(
[self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
snake_case_ : List[str] = self.ff(_UpperCamelCase )
if self.use_ada_layer_norm_zero:
snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
snake_case_ : Any = ff_output + hidden_states
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,):
super().__init__()
snake_case_ : Tuple = int(dim * mult )
snake_case_ : Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase )
if activation_fn == "gelu-approximate":
snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase )
elif activation_fn == "geglu-approximate":
snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Dict = nn.ModuleList([] )
# project in
self.net.append(_UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(_UpperCamelCase ) )
# project out
self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_UpperCamelCase ) )
def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ):
for module in self.net:
snake_case_ : Tuple = module(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ):
super().__init__()
snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[Any] = approximate
def a__ ( self :str ,_UpperCamelCase :int ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ):
snake_case_ : Optional[Any] = self.proj(_UpperCamelCase )
snake_case_ : int = self.gelu(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 )
def a__ ( self :Dict ,_UpperCamelCase :List[str] ):
if gate.device.type != "mps":
return F.gelu(_UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(_UpperCamelCase )
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ):
snake_case_ : int = self.proj(_UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class __UpperCamelCase ( nn.Module ):
def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
super().__init__()
snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = nn.SiLU()
snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 )
snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) )
snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 )
snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift
return x
class __UpperCamelCase ( nn.Module ):
def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ):
super().__init__()
snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = nn.SiLU()
snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase )
snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ):
snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 )
snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __UpperCamelCase ( nn.Module ):
def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ):
super().__init__()
snake_case_ : Optional[int] = num_groups
snake_case_ : List[Any] = eps
if act_fn is None:
snake_case_ : int = None
else:
snake_case_ : Dict = get_activation(_UpperCamelCase )
snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 )
def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ):
if self.act:
snake_case_ : Any = self.act(_UpperCamelCase )
snake_case_ : Optional[int] = self.linear(_UpperCamelCase )
snake_case_ : Dict = emb[:, :, None, None]
snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 )
snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps )
snake_case_ : List[str] = x * (1 + scale) + shift
return x
| 8 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_A : Tuple = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = set_counts
A__ = max(__lowerCamelCase )
A__ = len(__lowerCamelCase )
A__ = [1] * num_sets
A__ = list(range(__lowerCamelCase ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.get_parent(__lowerCamelCase )
A__ = self.get_parent(__lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set,__lowerCamelCase )
return True
def UpperCamelCase ( self,__lowerCamelCase ):
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 193 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : List[Any] = tempfile.mkdtemp()
__UpperCamelCase : Tuple = BlipImageProcessor()
__UpperCamelCase : Dict = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__UpperCamelCase : Dict = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
__UpperCamelCase : Optional[Any] = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :Tuple , **a :Union[str, Any] ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def _lowerCamelCase ( self :Any , **a :Optional[Any] ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def _lowerCamelCase ( self :Dict , **a :List[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def _lowerCamelCase ( self :Optional[int] ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self :List[str] ) -> Dict:
__UpperCamelCase : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self :List[Any] ) -> str:
__UpperCamelCase : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase : List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__UpperCamelCase : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Optional[Any] = self.get_qformer_tokenizer()
__UpperCamelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="np" )
__UpperCamelCase : Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self :Optional[Any] ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : List[Any] = self.get_tokenizer()
__UpperCamelCase : int = self.get_qformer_tokenizer()
__UpperCamelCase : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : List[str] = """lower newer"""
__UpperCamelCase : List[Any] = processor(text=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : Optional[int] = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def _lowerCamelCase ( self :List[str] ) -> List[str]:
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
__UpperCamelCase : List[str] = self.get_qformer_tokenizer()
__UpperCamelCase : Dict = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : List[Any] = """lower newer"""
__UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
__UpperCamelCase : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : str = self.get_qformer_tokenizer()
__UpperCamelCase : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : str = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Tuple = self.get_qformer_tokenizer()
__UpperCamelCase : Optional[int] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase : int = """lower newer"""
__UpperCamelCase : List[str] = self.prepare_image_inputs()
__UpperCamelCase : Tuple = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 366 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , **a :Union[str, Any] ) -> Union[str, Any]:
super().__init__(**a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , a :Union[str, List[str], "Image", List["Image"]] , **a :Tuple ) -> List[str]:
return super().__call__(a , **a )
def _lowerCamelCase ( self :List[Any] , **a :List[str] ) -> List[Any]:
__UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCamelCase : Optional[int] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCamelCase : List[str] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :List[str]=None , a :Dict="This is a photo of {}." ) -> Any:
__UpperCamelCase : Dict = load_image(a )
__UpperCamelCase : Any = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase : str = candidate_labels
__UpperCamelCase : List[Any] = [hypothesis_template.format(a ) for x in candidate_labels]
__UpperCamelCase : List[Any] = self.tokenizer(a , return_tensors=self.framework , padding=a )
__UpperCamelCase : Any = [text_inputs]
return inputs
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[Any] ) -> List[Any]:
__UpperCamelCase : List[str] = model_inputs.pop("candidate_labels" )
__UpperCamelCase : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , a ):
__UpperCamelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
__UpperCamelCase : int = text_inputs[0][0]
__UpperCamelCase : str = self.model(**a , **a )
__UpperCamelCase : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self :List[Any] , a :List[Any] ) -> Tuple:
__UpperCamelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCamelCase : Optional[Any] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCamelCase : int = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase : List[str] = probs.tolist()
if not isinstance(a , a ):
__UpperCamelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCamelCase : Optional[int] = stable_softmax(a , axis=-1 )
__UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result
| 151 | 0 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = RobertaPreLayerNormConfig.from_pretrained(
_lowerCAmelCase , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
snake_case_ = torch.load(hf_hub_download(repo_id=_lowerCAmelCase , filename='pytorch_model.bin' ) )
snake_case_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
snake_case_ = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
snake_case_ = tensor_value
snake_case_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowerCAmelCase , config=_lowerCAmelCase , state_dict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
# convert tokenizer
snake_case_ = AutoTokenizer.from_pretrained(_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 69 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_a = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 320 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__UpperCAmelCase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
__UpperCAmelCase : Dict = False
for divisor in range(2 , int(round(sqrt(_UpperCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__UpperCAmelCase : Optional[int] = False
break
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'status' must been from type bool"
return status
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__UpperCAmelCase : Union[str, Any] = list(range(2 , n + 1 ) )
__UpperCAmelCase : List[str] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCamelCase ) ):
for j in range(i + 1 , len(_UpperCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__UpperCAmelCase : Union[str, Any] = 0
# filters actual prime numbers.
__UpperCAmelCase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2"
__UpperCAmelCase : List[str] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCamelCase ):
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__UpperCAmelCase : str = [] # this list will be returns of the function.
# potential prime number factors.
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : List[str] = number
if number == 0 or number == 1:
ans.append(_UpperCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCamelCase ):
while quotient != 1:
if is_prime(_UpperCamelCase ) and (quotient % factor == 0):
ans.append(_UpperCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
__UpperCAmelCase : Tuple = prime_factorization(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = max(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__UpperCAmelCase : Optional[int] = 0
# prime factorization of 'number'
__UpperCAmelCase : Tuple = prime_factorization(_UpperCamelCase )
__UpperCAmelCase : Tuple = min(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase ( _UpperCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase ) and (number > 2) and is_even(_UpperCamelCase )
), "'number' must been an int, even and > 2"
__UpperCAmelCase : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__UpperCAmelCase : Tuple = get_prime_numbers(_UpperCamelCase )
__UpperCAmelCase : Tuple = len(_UpperCamelCase )
# run variable for while-loops.
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = None
# exit variable. for break up the loops
__UpperCAmelCase : int = True
while i < len_pn and loop:
__UpperCAmelCase : List[str] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__UpperCAmelCase : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (len(_UpperCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : Optional[Any] = 0
while numbera != 0:
__UpperCAmelCase : Union[str, Any] = numbera % numbera
__UpperCAmelCase : Any = numbera
__UpperCAmelCase : Tuple = rest
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__UpperCAmelCase : Optional[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__UpperCAmelCase : Dict = prime_factorization(_UpperCamelCase )
__UpperCAmelCase : List[Any] = prime_factorization(_UpperCamelCase )
elif numbera == 1 or numbera == 1:
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = max(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__UpperCAmelCase : Union[str, Any] = prime_fac_a.count(_UpperCamelCase )
__UpperCAmelCase : List[str] = prime_fac_a.count(_UpperCamelCase )
for _ in range(max(_UpperCamelCase , _UpperCamelCase ) ):
ans *= n
else:
__UpperCAmelCase : List[str] = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__UpperCAmelCase : Tuple = prime_fac_a.count(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
ans *= n
done.append(_UpperCamelCase )
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase ( _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'number' must been a positive int"
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCamelCase ):
ans += 1
# precondition
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and is_prime(
_UpperCamelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
assert (
is_prime(_UpperCamelCase ) and is_prime(_UpperCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__UpperCAmelCase : int = p_number_a + 1 # jump to the next number
__UpperCAmelCase : Any = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCamelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and ans[0] != p_number_a
and ans[len(_UpperCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__UpperCAmelCase : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCamelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__UpperCAmelCase : List[str] = get_divisors(_UpperCamelCase )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) -> str:
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and isinstance(_UpperCamelCase , _UpperCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__UpperCAmelCase : Any = gcd(abs(_UpperCamelCase ) , abs(_UpperCamelCase ) )
# precondition
assert (
isinstance(_UpperCamelCase , _UpperCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__UpperCAmelCase : Union[str, Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[Any] = 1 # this will be return
for _ in range(n - 1 ):
__UpperCAmelCase : Any = ans
ans += fiba
__UpperCAmelCase : List[Any] = tmp
return ans
| 368 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ):
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 320 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Optional[int] = logging.get_logger(__name__)
A__: Tuple = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """owlvit_text_model"""
def __init__( self: Tuple , __lowerCamelCase: List[Any]=4_9408 , __lowerCamelCase: Tuple=512 , __lowerCamelCase: Tuple=2048 , __lowerCamelCase: Optional[int]=12 , __lowerCamelCase: Optional[Any]=8 , __lowerCamelCase: Dict=16 , __lowerCamelCase: List[Any]="quick_gelu" , __lowerCamelCase: Any=1e-5 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: List[str]=1.0 , __lowerCamelCase: Any=0 , __lowerCamelCase: str=4_9406 , __lowerCamelCase: Optional[int]=4_9407 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[str] = hidden_size
UpperCamelCase__: str = intermediate_size
UpperCamelCase__: Dict = num_hidden_layers
UpperCamelCase__: Optional[Any] = num_attention_heads
UpperCamelCase__: Optional[int] = max_position_embeddings
UpperCamelCase__: Union[str, Any] = hidden_act
UpperCamelCase__: Optional[int] = layer_norm_eps
UpperCamelCase__: Union[str, Any] = attention_dropout
UpperCamelCase__: Any = initializer_range
UpperCamelCase__: Union[str, Any] = initializer_factor
@classmethod
def UpperCAmelCase_ ( cls: Optional[Any] , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase__: int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """owlvit_vision_model"""
def __init__( self: Any , __lowerCamelCase: List[Any]=768 , __lowerCamelCase: Tuple=3072 , __lowerCamelCase: str=12 , __lowerCamelCase: Union[str, Any]=12 , __lowerCamelCase: Dict=3 , __lowerCamelCase: Optional[Any]=768 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Tuple="quick_gelu" , __lowerCamelCase: Dict=1e-5 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: Optional[int]=1.0 , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: Tuple = hidden_size
UpperCamelCase__: int = intermediate_size
UpperCamelCase__: Optional[Any] = num_hidden_layers
UpperCamelCase__: Optional[int] = num_attention_heads
UpperCamelCase__: Any = num_channels
UpperCamelCase__: Dict = image_size
UpperCamelCase__: Optional[Any] = patch_size
UpperCamelCase__: str = hidden_act
UpperCamelCase__: List[str] = layer_norm_eps
UpperCamelCase__: List[Any] = attention_dropout
UpperCamelCase__: List[str] = initializer_range
UpperCamelCase__: Union[str, Any] = initializer_factor
@classmethod
def UpperCAmelCase_ ( cls: Dict , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Any = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase__: Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """owlvit"""
UpperCamelCase__ = True
def __init__( self: Optional[Any] , __lowerCamelCase: Dict=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: List[Any]=512 , __lowerCamelCase: int=2.6_592 , __lowerCamelCase: Dict=True , **__lowerCamelCase: Any , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if text_config is None:
UpperCamelCase__: List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCamelCase__: List[str] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCamelCase__: Optional[Any] = OwlViTTextConfig(**__lowerCamelCase )
UpperCamelCase__: List[str] = OwlViTVisionConfig(**__lowerCamelCase )
UpperCamelCase__: Optional[int] = projection_dim
UpperCamelCase__: str = logit_scale_init_value
UpperCamelCase__: int = return_dict
UpperCamelCase__: Union[str, Any] = 1.0
@classmethod
def UpperCAmelCase_ ( cls: Union[str, Any] , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCAmelCase_ ( cls: str , __lowerCamelCase: Dict , __lowerCamelCase: Dict , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = {}
UpperCamelCase__: Optional[int] = text_config
UpperCamelCase__: Dict = vision_config
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = copy.deepcopy(self.__dict__ )
UpperCamelCase__: List[Any] = self.text_config.to_dict()
UpperCamelCase__: Any = self.vision_config.to_dict()
UpperCamelCase__: Any = self.__class__.model_type
return output
class _a ( UpperCamelCase__):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return 1e-4
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: "ProcessorMixin" , __lowerCamelCase: int = -1 , __lowerCamelCase: int = -1 , __lowerCamelCase: Optional["TensorType"] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , framework=__lowerCamelCase )
UpperCamelCase__: List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__lowerCamelCase , framework=__lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return 14
| 149 |
from collections import deque
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = process_name # process name
UpperCamelCase__: Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__: Tuple = arrival_time
UpperCamelCase__: str = burst_time # remaining burst time
UpperCamelCase__: int = 0 # total time of the process wait in ready queue
UpperCamelCase__: List[Any] = 0 # time from arrival time to completion time
class _a :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: deque[Process] , __lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__: Optional[Any] = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__: Optional[int] = queue
# current time
UpperCamelCase__: Any = current_time
# finished process is in this sequence queue
UpperCamelCase__: deque[Process] = deque()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Dict = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: int = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
UpperCamelCase__: int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCamelCase__: Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__: List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: deque[Process] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
UpperCamelCase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__: Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__: Optional[int] = 0
# set the finish time
UpperCamelCase__: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__: Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__: Any = Process('''P1''', 0, 53)
A__: Tuple = Process('''P2''', 0, 17)
A__: Tuple = Process('''P3''', 0, 68)
A__: Tuple = Process('''P4''', 0, 24)
A__: Any = 3
A__: str = [17, 25]
A__: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__: str = Process('''P1''', 0, 53)
A__: Union[str, Any] = Process('''P2''', 0, 17)
A__: Optional[Any] = Process('''P3''', 0, 68)
A__: str = Process('''P4''', 0, 24)
A__: Any = 3
A__: Optional[Any] = [17, 25]
A__: Any = deque([Pa, Pa, Pa, Pa])
A__: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A__: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 149 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : List[Any] = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if "metadata" in layer:
UpperCAmelCase__ : int = layer.split("""metadata""" )
UpperCAmelCase__ : Any = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : List[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase__ : Union[str, Any] = layer.split("""kvstore""" )
UpperCAmelCase__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Dict = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase__ : List[Any] = layer.split("""/""" )
UpperCAmelCase__ : Optional[int] = """/""".join(split_layer[:-1] )
UpperCAmelCase__ : List[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase__ : Tuple = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
UpperCAmelCase__ : int = """file"""
else:
UpperCAmelCase__ : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = rename_keys(UpperCamelCase__ )
UpperCAmelCase__ : str = {}
for k, v in current_block.items():
UpperCAmelCase__ : Optional[int] = v
UpperCAmelCase__ : int = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ):
UpperCAmelCase__ : List[Any] = convert_file_size_to_int(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Any = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase__ : str = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase__ : Union[str, Any] = flatten_dict(UpperCamelCase__ , sep="""/""" )
UpperCAmelCase__ : List[Any] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
UpperCAmelCase__ : Union[str, Any] = content
else:
UpperCAmelCase__ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase__ : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase__ : Optional[int] = torch.tensor(UpperCamelCase__ )
UpperCAmelCase__ : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase__ , UpperCAmelCase__ : int = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = """/""".join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase__ : Optional[int] = os.path.join(
UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{len(UpperCamelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = {}
for idx, shard in enumerate(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = weights_name.replace(
""".bin""" , f'''-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase__ : List[Any] = shard
for key in shard:
UpperCAmelCase__ : str = shard_file
# Add the metadata
UpperCAmelCase__ : Union[str, Any] = {"""total_size""": total_size}
UpperCAmelCase__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Union[str, Any] = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A =parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase__ : Dict = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase__ : Optional[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase__ : Any = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase__ : Optional[int] = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Dict = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 283 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A =logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
UpperCAmelCase__ : str = bnb_quantization_config.load_in_abit
UpperCAmelCase__ : str = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
UpperCAmelCase__ : List[Any] = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
UpperCAmelCase__ : Dict = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase__ : Any = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
UpperCAmelCase__ : Optional[int] = load_in_abit
UpperCAmelCase__ : List[Any] = load_in_abit
UpperCAmelCase__ : Dict = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
UpperCAmelCase__ : Optional[int] = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
UpperCAmelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase__ : List[str] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
UpperCAmelCase__ : int = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCAmelCase__ : Tuple = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
UpperCAmelCase__ : Any = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase__ : Any = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
UpperCAmelCase__ : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Union[str, Any] = special_dtypes
UpperCAmelCase__ : Optional[int] = no_split_module_classes
UpperCAmelCase__ : Optional[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase__ : Optional[int] = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase__ : str = max_memory
UpperCAmelCase__ : Any = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
UpperCAmelCase__ : Optional[int] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase__ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
if modules_to_not_convert is None:
UpperCAmelCase__ : Any = []
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
UpperCAmelCase__ : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase__ : Dict = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase__ : List[str] = """.""".join(UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase__ : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase__ : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
UpperCAmelCase__ : int = module.weight.data
if module.bias is not None:
UpperCAmelCase__ : Dict = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase__ , UpperCAmelCase__ : str = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Any = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _UpperCamelCase ( UpperCamelCase__ ):
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase__ : Optional[int] = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase__ : Any = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase__ : str = sum(UpperCamelCase__ , [] )
UpperCAmelCase__ : int = len(UpperCamelCase__ ) > 0
# Check if it is a base model
UpperCAmelCase__ : int = False
if hasattr(UpperCamelCase__ , """base_model_prefix""" ):
UpperCAmelCase__ : Tuple = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase__ : Optional[Any] = list(model.named_children() )
UpperCAmelCase__ : int = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase__ : Optional[int] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Any = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
UpperCAmelCase__ : int = [""".weight""", """.bias"""]
UpperCAmelCase__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase__ : List[Any] = name.replace(UpperCamelCase__ , """""" )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def _UpperCamelCase ( UpperCamelCase__ ):
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def _UpperCamelCase ( UpperCamelCase__ ):
return next(parameter.parameters() ).device
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
UpperCAmelCase__ : Any = param_name
UpperCAmelCase__ : Dict = model
if "." in tensor_name:
UpperCAmelCase__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
UpperCAmelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCAmelCase__ : List[str] = new_module
UpperCAmelCase__ : Dict = splits[-1]
# offload weights
UpperCAmelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , """meta""" , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) )
| 283 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=1_3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=[1, 2, 1] ,SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 4] ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=8 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Tuple = mlp_ratio
__lowerCamelCase : Dict = qkv_bias
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[Any] = use_absolute_embeddings
__lowerCamelCase : str = patch_norm
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : int = is_training
__lowerCamelCase : str = scope
__lowerCamelCase : int = use_labels
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : int = encoder_stride
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : int):
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = SwinvaModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__lowerCamelCase : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : Tuple = 1
__lowerCamelCase : Union[str, Any] = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = self.type_sequence_label_size
__lowerCamelCase : Optional[int] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = SwinvaModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,embed_dim=3_7)
def lowerCAmelCase ( self : int):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[Any]):
pass
def lowerCAmelCase ( self : Any):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,nn.Linear))
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = True
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
__lowerCamelCase : Tuple = len(self.model_tester.depths)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : Dict = True
__lowerCamelCase : int = config.window_size**2
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__)
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
if hasattr(self.model_tester ,'num_hidden_states_types'):
__lowerCamelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCamelCase : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Dict = outputs.hidden_states
__lowerCamelCase : List[str] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths) + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# Swinv2 has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
__lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
__lowerCamelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,height * width).permute(0 ,2 ,1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = 3
__lowerCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[int] = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = _config_zero_init(SCREAMING_SNAKE_CASE__)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : str):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.default_image_processor
__lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Tuple = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """align_text_model"""
def __init__( self : Optional[int] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Union[str, Any]=5_12 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=1E-12 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : str="absolute" , _UpperCAmelCase : str=True , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = """align_vision_model"""
def __init__( self : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 6_00 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 3.1 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , _UpperCAmelCase : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , _UpperCAmelCase : List[int] = [] , _UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase : float = 0.25 , _UpperCAmelCase : str = "swish" , _UpperCAmelCase : int = 25_60 , _UpperCAmelCase : str = "mean" , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 0.001 , _UpperCAmelCase : float = 0.99 , _UpperCAmelCase : float = 0.2 , **_UpperCAmelCase : Tuple , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(_UpperCAmelCase ) * 4
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : int = """align"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : List[str] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Tuple=6_40 , _UpperCAmelCase : Optional[Any]=1.0 , _UpperCAmelCase : Any=0.02 , **_UpperCAmelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
UpperCAmelCase__ = AlignTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = AlignVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , _UpperCAmelCase : AlignTextConfig , _UpperCAmelCase : AlignVisionConfig , **_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 359 |
'''simple docstring'''
UpperCAmelCase_ = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ = [None] * 1_0_0_0_0_0_0_0
UpperCAmelCase_ = True
UpperCAmelCase_ = False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = number_chain
while number < 10000000:
UpperCAmelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 10000000 ):
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 61 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase = pytest.mark.integration
__UpperCAmelCase = {"comet"}
__UpperCAmelCase = importlib.util.find_spec("fairseq") is not None
__UpperCAmelCase = {"code_eval"}
__UpperCAmelCase = os.name == "nt"
__UpperCAmelCase = {"bertscore", "frugalscore", "perplexity"}
__UpperCAmelCase = importlib.util.find_spec("transformers") is not None
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( __lowerCamelCase ):
@wraps(__lowerCamelCase )
def wrapper(self, __lowerCamelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self, __lowerCamelCase )
return wrapper
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@local
class UpperCamelCase__ ( parameterized.TestCase ):
"""simple docstring"""
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def _UpperCamelCase ( self , _A ) -> int:
SCREAMING_SNAKE_CASE_ = '''[...]'''
SCREAMING_SNAKE_CASE_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path )
SCREAMING_SNAKE_CASE_ = datasets.load.import_main_class(metric_module.__name__ , dataset=_A )
# check parameters
SCREAMING_SNAKE_CASE_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_A , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE_ = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _UpperCamelCase ( self , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''[...]'''
SCREAMING_SNAKE_CASE_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE_ = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _UpperCamelCase ( self , _A , _A ) -> List[str]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_A ):
yield
else:
yield
@contextmanager
def _UpperCamelCase ( self ) -> Dict:
def load_local_metric(_A , *_A , **_A ):
return load_metric(os.path.join('''metrics''' , _A ) , *_A , **_A )
with patch('''datasets.load_metric''' ) as mock_load_metric:
SCREAMING_SNAKE_CASE_ = load_local_metric
yield
@classmethod
def _UpperCamelCase ( cls , _A ) -> Tuple:
def wrapper(_A ):
SCREAMING_SNAKE_CASE_ = contextmanager(_A )
SCREAMING_SNAKE_CASE_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def A__ ( __lowerCamelCase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''', '''''', '''''' ) # handle pytest cli flags
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( self , _A ) -> Tuple:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def A__ ( __lowerCamelCase ):
import torch
def bert_cos_score_idf(__lowerCamelCase, __lowerCamelCase, *__lowerCamelCase, **__lowerCamelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def A__ ( __lowerCamelCase ):
def load_from_checkpoint(__lowerCamelCase ):
class UpperCamelCase__ :
"""simple docstring"""
def _UpperCamelCase ( self , _A , *_A , **_A ) -> int:
assert len(_A ) == 2
SCREAMING_SNAKE_CASE_ = [0.19, 0.92]
return scores, sum(_A ) / len(_A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
SCREAMING_SNAKE_CASE_ = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE_ = load_from_checkpoint
yield
def A__ ( ):
SCREAMING_SNAKE_CASE_ = load_metric(os.path.join('''metrics''', '''seqeval''' ) )
SCREAMING_SNAKE_CASE_ = '''ERROR'''
SCREAMING_SNAKE_CASE_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__lowerCamelCase, match=re.escape(__lowerCamelCase ) ):
metric.compute(predictions=[], references=[], scheme=__lowerCamelCase )
| 299 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_A : int =imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_A : Optional[Any] =cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ : int = cn.convert_to_negative(UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase__ : Union[str, Any] = canny.canny(UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE_ () -> str:
assert gg.gaussian_filter(UpperCamelCase , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE_ () -> int:
# laplace diagonals
lowerCamelCase__ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase__ : int = conv.img_convolve(UpperCamelCase , UpperCamelCase ).astype(UpperCamelCase )
assert res.any()
def SCREAMING_SNAKE_CASE_ () -> Any:
assert med.median_filter(UpperCamelCase , 3 ).any()
def SCREAMING_SNAKE_CASE_ () -> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = sob.sobel_filter(UpperCamelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Union[str, Any] = sp.make_sepia(UpperCamelCase , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = bs.Burkes(imread(UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
lowerCamelCase__ : int = rs.NearestNeighbour(imread(UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
lowerCamelCase__ : Tuple = imread(UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase__ : int = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Optional[Any] = image[x_coordinate][y_coordinate]
lowerCamelCase__ : str = lbp.get_neighbors_pixel(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCamelCase__ : List[str] = lbp.local_binary_value(UpperCamelCase , UpperCamelCase , UpperCamelCase )
assert lbp_image.any()
| 129 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase ( _lowercase ):
pass
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
def __iter__( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self
lowerCamelCase__ : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
lowerCamelCase__ : List[str] = node.next_node
@property
def lowerCamelCase_ ( self: Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_A : Any =Node(1)
_A : Optional[int] =Node(2)
_A : Dict =Node(3)
_A : Optional[Any] =Node(4)
print(root_node.has_loop) # False
_A : Any =root_node.next_node
print(root_node.has_loop) # True
_A : Dict =Node(5)
_A : Union[str, Any] =Node(6)
_A : str =Node(5)
_A : int =Node(6)
print(root_node.has_loop) # False
_A : Optional[Any] =Node(1)
print(root_node.has_loop) # False
| 129 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__lowerCamelCase : Any = '''hf-internal-testing/tiny-random-bert'''
__lowerCamelCase : Dict = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__lowerCamelCase : Optional[int] = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = cached_file(_A,_A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A,_A ) ) )
with open(os.path.join(_A,"refs","main" ) ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.read()
self.assertEqual(_A,os.path.join(_A,"snapshots",_A,_A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_file(_A,_A )
self.assertEqual(_A,_A )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_file(_A,_A,revision="9b8c223" )
self.assertEqual(_A,os.path.join(_A,"snapshots",_A,_A ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(_A,"is not a valid model identifier" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = cached_file("tiny-random-bert",_A )
with self.assertRaisesRegex(_A,"is not a valid git identifier" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_file(_A,_A,revision="aaaa" )
with self.assertRaisesRegex(_A,"does not appear to have a file named" ):
SCREAMING_SNAKE_CASE_ : Any = cached_file(_A,"conf" )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(_A,"does not appear to have a file named" ):
SCREAMING_SNAKE_CASE_ : Any = cached_file(_A,"conf" )
with open(os.path.join(_A,"refs","main" ) ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A,".no_exist",_A,"conf" ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cached_file(_A,"conf",_raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
SCREAMING_SNAKE_CASE_ : Any = cached_file(_A,"conf",local_files_only=_A,_raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
SCREAMING_SNAKE_CASE_ : int = mock.Mock()
SCREAMING_SNAKE_CASE_ : int = 500
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = HTTPError
SCREAMING_SNAKE_CASE_ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request",return_value=_A ) as mock_head:
SCREAMING_SNAKE_CASE_ : Any = cached_file(_A,"conf",_raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only",_A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",_A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",_A ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo("bert-base-cased","ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A,"is not a valid model identifier" ):
get_file_from_repo("bert-base-case",_A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A,"is not a valid git identifier" ):
get_file_from_repo("bert-base-cased",_A,revision="ahaha" )
SCREAMING_SNAKE_CASE_ : List[str] = get_file_from_repo("bert-base-cased",_A )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE_ : Dict = json.loads(open(_A,"r" ).read() )
self.assertEqual(config["hidden_size"],768 )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Dict = Path(_A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(_A,"a.txt" ),str(_A ) )
self.assertIsNone(get_file_from_repo(_A,"b.txt" ) )
| 18 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( __lowercase , __lowercase , __lowercase):
'''simple docstring'''
@register_to_config
def __init__( self :str , a :int , a :int , a :int , a :float , a :int , a :int , a :int , a :int , a :str , a :bool = False , ) -> int:
super().__init__()
__UpperCamelCase : Union[str, Any] = nn.Embedding(a , a )
__UpperCamelCase : List[str] = nn.Embedding(a , a )
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = nn.Dropout(p=a )
__UpperCamelCase : List[Any] = TaConfig(
vocab_size=a , d_model=a , num_heads=a , d_kv=a , d_ff=a , dropout_rate=a , feed_forward_proj=a , is_decoder=a , is_encoder_decoder=a , )
__UpperCamelCase : str = nn.ModuleList()
for lyr_num in range(a ):
__UpperCamelCase : Optional[Any] = TaBlock(a )
self.encoders.append(a )
__UpperCamelCase : Any = TaLayerNorm(a )
__UpperCamelCase : str = nn.Dropout(p=a )
def _lowerCamelCase ( self :int , a :List[Any] , a :int ) -> Tuple:
__UpperCamelCase : Dict = self.token_embedder(a )
__UpperCamelCase : List[str] = encoder_input_tokens.shape[1]
__UpperCamelCase : int = torch.arange(a , device=encoder_input_tokens.device )
x += self.position_encoding(a )
__UpperCamelCase : Dict = self.dropout_pre(a )
# inverted the attention mask
__UpperCamelCase : Dict = encoder_input_tokens.size()
__UpperCamelCase : Any = self.get_extended_attention_mask(a , a )
for lyr in self.encoders:
__UpperCamelCase : int = lyr(a , a )[0]
__UpperCamelCase : Any = self.layer_norm(a )
return self.dropout_post(a ), encoder_inputs_mask
| 151 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 151 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger()
def lowerCAmelCase_ ( __A, __A, __A, __A, __A = True ) -> str:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCAmelCase__ = timm.create_model("levit_128s", pretrained=__A )
else:
UpperCAmelCase__ = timm.create_model("levit_128", pretrained=__A )
if hidden_sizes == 192:
UpperCAmelCase__ = timm.create_model("levit_192", pretrained=__A )
if hidden_sizes == 256:
UpperCAmelCase__ = timm.create_model("levit_256", pretrained=__A )
if hidden_sizes == 384:
UpperCAmelCase__ = timm.create_model("levit_384", pretrained=__A )
from_model.eval()
UpperCAmelCase__ = LevitForImageClassificationWithTeacher(__A ).eval()
UpperCAmelCase__ = OrderedDict()
UpperCAmelCase__ = from_model.state_dict()
UpperCAmelCase__ = list(from_model.state_dict().keys() )
UpperCAmelCase__ = list(our_model.state_dict().keys() )
print(len(__A ), len(__A ) )
for i in range(len(__A ) ):
UpperCAmelCase__ = weights[og_keys[i]]
our_model.load_state_dict(__A )
UpperCAmelCase__ = torch.randn((2, 3, 224, 224) )
UpperCAmelCase__ = from_model(__A )
UpperCAmelCase__ = our_model(__A ).logits
assert torch.allclose(__A, __A ), "The model logits don't match the original one."
UpperCAmelCase__ = name
print(__A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCAmelCase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def lowerCAmelCase_ ( __A, __A = None, __A = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = (1, num_labels)
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = partial(__A, num_labels=__A, idalabel=__A, labelaid=__A )
UpperCAmelCase__ = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
UpperCAmelCase__ = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], __A, names_to_config[model_name], __A, __A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], __A, __A, __A, __A )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 65 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case = 16000 ) -> Any:
'''simple docstring'''
lowerCamelCase__ = int(round(sample_rate * max_length ) )
if len(__snake_case ) <= sample_length:
return wav
lowerCamelCase__ = randint(0 ,len(__snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowerCAmelCase_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowerCAmelCase_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowerCAmelCase_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
lowerCAmelCase_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowerCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' ,__snake_case ,__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(__snake_case ):
lowerCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase__ = random_subsample(
audio['''array'''] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__snake_case )
lowerCamelCase__ = feature_extractor(__snake_case ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__snake_case )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__snake_case ):
lowerCamelCase__ = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCamelCase__ = feature_extractor(__snake_case ,sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__snake_case )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCamelCase__ , lowerCamelCase__ = {}, {}
for i, label in enumerate(__snake_case ):
lowerCamelCase__ = str(__snake_case )
lowerCamelCase__ = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
lowerCamelCase__ = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=__snake_case ,references=eval_pred.label_ids )
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__snake_case ) ,labelaid=__snake_case ,idalabel=__snake_case ,finetuning_task='''audio-classification''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__snake_case ,output_all_columns=__snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__snake_case ,output_all_columns=__snake_case )
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=__snake_case ,args=__snake_case ,train_dataset=raw_datasets['''train'''] if training_args.do_train else None ,eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None ,compute_metrics=__snake_case ,tokenizer=__snake_case ,)
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics )
trainer.save_metrics('''train''' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' ,__snake_case )
trainer.save_metrics('''eval''' ,__snake_case )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 209 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_snake_case = 'src/transformers'
_snake_case = 'docs/source/en/tasks'
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_a : Tuple = f.readlines()
# Find the start prompt.
_a : Optional[Any] = 0
while not lines[start_index].startswith(__a ):
start_index += 1
start_index += 1
_a : Tuple = start_index
while not lines[end_index].startswith(__a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
_snake_case = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_snake_case = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
_a : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__a , set() )
_a : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_a , _a , _a , _a : str = _find_text_in_file(
filename=os.path.join(__a , __a ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_a : Optional[Any] = get_model_list_for_task(__a )
if current_list != new_list:
if overwrite:
with open(os.path.join(__a , __a ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 350 |
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ = """pt"""
elif is_tf_available():
lowercase__ = """tf"""
else:
lowercase__ = """jax"""
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = PerceiverTokenizer
lowerCamelCase__ = False
def A_ ( self ):
super().setUp()
_lowerCamelCase : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A_ ( self , **lowercase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCamelCase : List[Any] = []
for i in range(len(lowercase ) ):
try:
_lowerCamelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase : Optional[int] = list(filter(lambda lowercase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , lowercase ) )
_lowerCamelCase : Any = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
_lowerCamelCase : List[str] = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
_lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
_lowerCamelCase : Dict = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
_lowerCamelCase : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
_lowerCamelCase : Optional[Any] = ' ' + output_txt
_lowerCamelCase : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.perceiver_tokenizer
_lowerCamelCase : Dict = 'Unicode €.'
_lowerCamelCase : int = tokenizer(lowercase )
_lowerCamelCase : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_lowerCamelCase : Optional[int] = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]Unicode €.[SEP]' )
_lowerCamelCase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowerCamelCase : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowercase )
# decoding
_lowerCamelCase : int = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.perceiver_tokenizer
_lowerCamelCase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCamelCase : List[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_lowerCamelCase : Dict = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
_lowerCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.perceiver_tokenizer
_lowerCamelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCamelCase : List[str] = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowercase )
self.assertIn('attention_mask' , lowercase )
self.assertNotIn('decoder_input_ids' , lowercase )
self.assertNotIn('decoder_attention_mask' , lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.perceiver_tokenizer
_lowerCamelCase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
_lowerCamelCase : Optional[int] = tokenizer(
text_target=lowercase , max_length=32 , padding='max_length' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A_ ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCamelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCamelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCamelCase : List[str] = ' He is very happy, UNwant\u00E9d,running'
_lowerCamelCase : List[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_lowerCamelCase : Any = tokenizer.__class__.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
_lowerCamelCase : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCamelCase : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
_lowerCamelCase : str = tokenizer.__class__.from_pretrained(lowercase )
_lowerCamelCase : Any = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCamelCase : List[Any] = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : Tuple = json.load(lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase : List[str] = json.load(lowercase )
_lowerCamelCase : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowerCamelCase : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCamelCase : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowercase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase : Optional[int] = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowercase )]
_lowerCamelCase : Any = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowerCamelCase : List[Any] = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : Dict = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 96 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states
| 96 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: Optional[int] = logging.get_logger(__name__)
__lowercase: Tuple = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowerCamelCase = 'time_series_transformer'
_lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Union[str, Any], a_ : int = None, a_ : Dict = None, a_ : Union[str, Any] = "student_t", a_ : Optional[Any] = "nll", a_ : Optional[int] = 1, a_ : Tuple = [1, 2, 3, 4, 5, 6, 7], a_ : Any = "mean", a_ : Union[str, Any] = 0, a_ : str = 0, a_ : Dict = 0, a_ : Tuple = 0, a_ : int = None, a_ : Tuple = None, a_ : str = 32, a_ : str = 32, a_ : List[str] = 2, a_ : List[str] = 2, a_ : Dict = 2, a_ : Any = 2, a_ : Optional[Any] = True, a_ : Dict = "gelu", a_ : str = 64, a_ : int = 0.1, a_ : str = 0.1, a_ : int = 0.1, a_ : Union[str, Any] = 0.1, a_ : List[Any] = 0.1, a_ : Tuple = 100, a_ : List[Any] = 0.02, a_ : Optional[Any]=True, **a_ : Dict, ):
"""simple docstring"""
UpperCamelCase__ = prediction_length
UpperCamelCase__ = context_length or prediction_length
UpperCamelCase__ = distribution_output
UpperCamelCase__ = loss
UpperCamelCase__ = input_size
UpperCamelCase__ = num_time_features
UpperCamelCase__ = lags_sequence
UpperCamelCase__ = scaling
UpperCamelCase__ = num_dynamic_real_features
UpperCamelCase__ = num_static_real_features
UpperCamelCase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase__ = cardinality
else:
UpperCamelCase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCamelCase__ = embedding_dimension
else:
UpperCamelCase__ = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase__ = input_size * len(_a ) + self._number_of_features
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = use_cache
super().__init__(is_encoder_decoder=_a, **_a )
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 361 |
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= OmegaConf.load(lowercase__ )
__lowercase= torch.load(lowercase__ , map_location='cpu' )['model']
__lowercase= list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase= {}
__lowercase= 'first_stage_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase= state_dict[key]
# extract state_dict for UNetLDM
__lowercase= {}
__lowercase= 'model.diffusion_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase= state_dict[key]
__lowercase= config.model.params.first_stage_config.params
__lowercase= config.model.params.unet_config.params
__lowercase= VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
__lowercase= UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
__lowercase= DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowercase__ , )
__lowercase= LDMPipeline(lowercase__ , lowercase__ , lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295 |
from collections.abc import Sequence
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowercase= 0 if allow_empty_subarrays else float('-inf' )
__lowercase= 0.0
for num in arr:
__lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase= max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 295 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def snake_case_ ( lowerCAmelCase_ : str ):
if hor == 128:
__lowercase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : List[Any] = (32, 128, 256)
__lowercase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__lowercase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : int = (32, 64, 128, 256)
__lowercase : int = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__lowercase : List[str] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__lowercase : Any = model.state_dict()
__lowercase : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__lowercase : Dict = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : List[Any] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__lowercase : Optional[Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__lowercase : int = model
__lowercase : Union[str, Any] = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : int = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 306 |
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306 | 1 |
import random
def __lowerCAmelCase ( a__ ) -> bool:
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(a__ , a__ , a__ )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def __lowerCAmelCase ( a__ ) -> bool:
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a__ )
def __lowerCAmelCase ( a__ = 1024 ) -> int:
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a__ ):
return num
if __name__ == "__main__":
A : Any = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 6 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 0 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return 10 - x * x
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError("""Wrong space!""" )
SCREAMING_SNAKE_CASE = a
while (b - a) >= 0.01:
# Find middle point
SCREAMING_SNAKE_CASE = (a + b) / 2
# Check if middle point is root
if equation(_SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) < 0:
SCREAMING_SNAKE_CASE = c
else:
SCREAMING_SNAKE_CASE = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 193 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 1 |
from __future__ import annotations
UpperCAmelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
_lowercase =1
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
_lowercase =init[0]
_lowercase =init[1]
_lowercase =0
_lowercase =g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase =[[f, g, x, y]]
_lowercase =False # flag that is set when search is complete
_lowercase =False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase =cell.pop()
_lowercase =next_cell[2]
_lowercase =next_cell[3]
_lowercase =next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase =True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
_lowercase =x + DIRECTIONS[i][0]
_lowercase =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase =g + cost
_lowercase =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase =1
_lowercase =i
_lowercase =[]
_lowercase =goal[0]
_lowercase =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase =x - DIRECTIONS[action[x][y]][0]
_lowercase =y - DIRECTIONS[action[x][y]][1]
_lowercase =xa
_lowercase =ya
invpath.append([x, y] )
_lowercase =[]
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ = 99
UpperCAmelCase__ ,UpperCAmelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
'''simple docstring'''
import re
import subprocess
import sys
_lowerCAmelCase = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_lowerCAmelCase = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
_lowerCAmelCase = "|".join(sys.argv[1:])
_lowerCAmelCase = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_lowerCAmelCase = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 371 |
'''simple docstring'''
from random import randint, random
def UpperCamelCase ( a , a , a , a = False , a = False , a = 5 , ) -> list:
'''simple docstring'''
__magic_name__ = [[-1] * number_of_cells] # Create a highway without any car
__magic_name__ = 0
__magic_name__ = max(a , 0 )
while i < number_of_cells:
__magic_name__ = (
randint(0 , a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = highway_now[car_index + 1 :]
for cell in range(len(a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a , -1 )
def UpperCamelCase ( a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(a )
# Beforce calculations, the highway is empty
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__magic_name__ = min(highway_now[car_index] + 1 , a )
# Number of empty cell before the next car
__magic_name__ = get_distance(a , a ) - 1
# We can't have the car causing an accident
__magic_name__ = min(next_highway[car_index] , a )
if random() < probability:
# Randomly, a driver will slow down
__magic_name__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCamelCase ( a , a , a , a ) -> list:
'''simple docstring'''
__magic_name__ = len(highway[0] )
for i in range(a ):
__magic_name__ = update(highway[i] , a , a )
__magic_name__ = [-1] * number_of_cells
for car_index in range(a ):
__magic_name__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__magic_name__ = (car_index + speed) % number_of_cells
# Commit the change of position
__magic_name__ = speed
highway.append(a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case :
def __init__( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : str=1_0 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[Any]=3_2 * 8 , UpperCamelCase__ : List[Any]=3_2 * 8 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=6_4 , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parent
__lowerCAmelCase: Any = batch_size
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Dict = use_auxiliary_loss
__lowerCAmelCase: List[Any] = num_queries
__lowerCAmelCase: str = num_channels
__lowerCAmelCase: Union[str, Any] = min_size
__lowerCAmelCase: List[Any] = max_size
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: int = hidden_dim
__lowerCAmelCase: Union[str, Any] = hidden_dim
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCamelCase__)
__lowerCAmelCase: int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__)
__lowerCAmelCase: Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__) > 0.5
).float()
__lowerCAmelCase: Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__) > 0.5).long()
__lowerCAmelCase: Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCAmelCase: Union[str, Any] = self.num_queries
__lowerCAmelCase: Optional[int] = self.num_labels
__lowerCAmelCase: Optional[int] = [1, 1, 1, 1]
__lowerCAmelCase: str = self.num_channels
__lowerCAmelCase: List[Any] = 6_4
__lowerCAmelCase: str = 1_2_8
__lowerCAmelCase: Any = self.hidden_dim
__lowerCAmelCase: List[str] = self.hidden_dim
__lowerCAmelCase: int = self.hidden_dim
return config
def lowercase_ ( self : Tuple)-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = self.prepare_config_and_inputs()
__lowerCAmelCase: str = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = output.encoder_hidden_states
__lowerCAmelCase: Optional[Any] = output.pixel_decoder_hidden_states
__lowerCAmelCase: str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , config.decoder_layers)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=False)-> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
__lowerCAmelCase: List[str] = MaskaFormerModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: int = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: str = MaskaFormerForUniversalSegmentation(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
def comm_check_on_output(UpperCamelCase__ : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
__lowerCAmelCase: Tuple = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
__lowerCAmelCase: List[str] = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = MaskaFormerModelTester(self)
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase__)
@unittest.skip(reason="Mask2Former does not use inputs_embeds")
def lowercase_ ( self : Any)-> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method")
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model")
def lowercase_ ( self : str)-> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings")
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def lowercase_ ( self : str)-> str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self : List[Any])-> List[Any]:
'''simple docstring'''
pass
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Union[str, Any] = model_class(UpperCamelCase__)
__lowerCAmelCase: List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase: List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase: str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__)
@slow
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCAmelCase: Union[str, Any] = MaskaFormerModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = (self.model_tester.min_size,) * 2
__lowerCAmelCase: Dict = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase__),
"mask_labels": torch.randn((2, 1_0, *size) , device=UpperCamelCase__),
"class_labels": torch.zeros(2 , 1_0 , device=UpperCamelCase__).long(),
}
__lowerCAmelCase: Dict = self.model_tester.get_config()
__lowerCAmelCase: int = MaskaFormerForUniversalSegmentation(UpperCamelCase__).to(UpperCamelCase__)
__lowerCAmelCase: str = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
def lowercase_ ( self : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: Dict = model_class(UpperCamelCase__).to(UpperCamelCase__)
__lowerCAmelCase: List[str] = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__)
self.assertTrue(outputs.attentions is not None)
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCAmelCase: Dict = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase: Tuple = model_class(UpperCamelCase__)
model.to(UpperCamelCase__)
model.train()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__).loss
loss.backward()
def lowercase_ ( self : List[Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.all_model_classes[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
__lowerCAmelCase: Dict = True
__lowerCAmelCase: List[Any] = True
__lowerCAmelCase: int = model_class(UpperCamelCase__).to(UpperCamelCase__)
model.train()
__lowerCAmelCase: Tuple = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
__lowerCAmelCase: str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCAmelCase: Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCAmelCase: Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCAmelCase: Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
__A = 1E-4
def a__ ( ) -> Optional[int]:
__lowerCAmelCase: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class snake_case ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self : Tuple)-> int:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = self.default_image_processor
__lowerCAmelCase: List[str] = prepare_img()
__lowerCAmelCase: List[str] = image_processor(UpperCamelCase__ , return_tensors="pt").to(UpperCamelCase__)
__lowerCAmelCase: Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
__lowerCAmelCase: List[str] = model(**UpperCamelCase__)
__lowerCAmelCase: str = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
__lowerCAmelCase: int = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
__lowerCAmelCase: str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(UpperCamelCase__).eval()
__lowerCAmelCase: int = self.default_image_processor
__lowerCAmelCase: Tuple = prepare_img()
__lowerCAmelCase: List[str] = image_processor(UpperCamelCase__ , return_tensors="pt").to(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = model(**UpperCamelCase__)
# masks_queries_logits
__lowerCAmelCase: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
__lowerCAmelCase: str = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__lowerCAmelCase: Optional[int] = torch.tensor(UpperCamelCase__).to(UpperCamelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
# class_queries_logits
__lowerCAmelCase: List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
__lowerCAmelCase: str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(UpperCamelCase__).eval()
__lowerCAmelCase: Any = self.default_image_processor
__lowerCAmelCase: List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors="pt" , )
__lowerCAmelCase: Tuple = inputs["pixel_values"].to(UpperCamelCase__)
__lowerCAmelCase: Dict = [el.to(UpperCamelCase__) for el in inputs["mask_labels"]]
__lowerCAmelCase: Dict = [el.to(UpperCamelCase__) for el in inputs["class_labels"]]
with torch.no_grad():
__lowerCAmelCase: int = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
| 217 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = """deformable_detr"""
SCREAMING_SNAKE_CASE_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0_0 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]="sine" , UpperCamelCase__ : Any="resnet50" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=3_0_0 , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.25 , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Dict , )-> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase: List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: int = backbone_config.get("model_type")
__lowerCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: Any = config_class.from_dict(UpperCamelCase__)
__lowerCAmelCase: int = use_timm_backbone
__lowerCAmelCase: Any = backbone_config
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: str = num_queries
__lowerCAmelCase: List[str] = max_position_embeddings
__lowerCAmelCase: List[Any] = d_model
__lowerCAmelCase: Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: List[str] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_ffn_dim
__lowerCAmelCase: Union[str, Any] = decoder_layers
__lowerCAmelCase: List[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: Union[str, Any] = activation_dropout
__lowerCAmelCase: Union[str, Any] = activation_function
__lowerCAmelCase: Dict = init_std
__lowerCAmelCase: int = init_xavier_std
__lowerCAmelCase: str = encoder_layerdrop
__lowerCAmelCase: Union[str, Any] = auxiliary_loss
__lowerCAmelCase: List[Any] = position_embedding_type
__lowerCAmelCase: str = backbone
__lowerCAmelCase: Tuple = use_pretrained_backbone
__lowerCAmelCase: int = dilation
# deformable attributes
__lowerCAmelCase: Union[str, Any] = num_feature_levels
__lowerCAmelCase: Optional[Any] = encoder_n_points
__lowerCAmelCase: Dict = decoder_n_points
__lowerCAmelCase: Optional[Any] = two_stage
__lowerCAmelCase: Tuple = two_stage_num_proposals
__lowerCAmelCase: int = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
__lowerCAmelCase: str = class_cost
__lowerCAmelCase: List[str] = bbox_cost
__lowerCAmelCase: List[str] = giou_cost
# Loss coefficients
__lowerCAmelCase: Tuple = mask_loss_coefficient
__lowerCAmelCase: int = dice_loss_coefficient
__lowerCAmelCase: Any = bbox_loss_coefficient
__lowerCAmelCase: str = giou_loss_coefficient
__lowerCAmelCase: int = eos_coefficient
__lowerCAmelCase: Tuple = focal_alpha
__lowerCAmelCase: Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
return self.d_model
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCAmelCase: str = self.backbone_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 217 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a_ = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
a_ = F'''{src_lang}-{tgt_lang}'''
a_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , "README.md" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase_ = Path(__file__).resolve().parent.parent.parent
UpperCamelCase_ = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase_ = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 303 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Dict:
a_ = inspect.getfile(accelerate.test_utils)
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_script.py"])
a_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "test_distributed_data_loop.py"])
a_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["scripts", "test_ops.py"])
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Any:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->str:
print(F'''Found {torch.cuda.device_count()} devices.''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
@require_multi_gpu
def UpperCAmelCase__ ( self) ->List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''')
a_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1"):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ''
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : List[str] = ""
else:
__snake_case : List[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__snake_case : List[str] = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Dict = in_proj_weight[
: config.hidden_size, :
]
__snake_case : Tuple = in_proj_bias[: config.hidden_size]
__snake_case : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__snake_case : Tuple = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : str = dct.pop(__lowerCamelCase )
__snake_case : Union[str, Any] = val
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = ViTMSNConfig()
__snake_case : Optional[int] = 1_0_0_0
__snake_case : Optional[int] = "datasets/huggingface/label-files"
__snake_case : Union[str, Any] = "imagenet-1k-id2label.json"
__snake_case : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase ) , "r" ) )
__snake_case : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Tuple = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__snake_case : Tuple = 3_8_4
__snake_case : str = 1_5_3_6
__snake_case : Any = 6
elif "l16" in checkpoint_url:
__snake_case : Any = 1_0_2_4
__snake_case : List[str] = 4_0_9_6
__snake_case : Optional[int] = 2_4
__snake_case : Dict = 1_6
__snake_case : Optional[int] = 0.1
elif "b4" in checkpoint_url:
__snake_case : Tuple = 4
elif "l7" in checkpoint_url:
__snake_case : List[Any] = 7
__snake_case : Dict = 1_0_2_4
__snake_case : Union[str, Any] = 4_0_9_6
__snake_case : Tuple = 2_4
__snake_case : Any = 1_6
__snake_case : Dict = 0.1
__snake_case : List[str] = ViTMSNModel(__lowerCamelCase )
__snake_case : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" )["target_encoder"]
__snake_case : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__lowerCamelCase )
__snake_case : Tuple = create_rename_keys(__lowerCamelCase , base_model=__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , base_model=__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
__snake_case : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
__snake_case : str = ViTImageProcessor(
size=config.image_size , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
__snake_case : str = image_processor(images=__lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
__snake_case : str = model(**__lowerCamelCase )
__snake_case : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__snake_case : Any = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
__snake_case : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
__snake_case : Any = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
__snake_case : Optional[int] = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
__snake_case : List[Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 123 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | 0 |
import argparse
import json
import subprocess
def snake_case( __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] = []
lowercase : List[Any] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase : str = subprocess.run(__magic_name__ , shell=__magic_name__ , stdout=subprocess.PIPE )
lowercase : Optional[int] = output.stdout.decode('''utf-8''' )
lowercase : Tuple = json.loads(__magic_name__ )
lowercase : Optional[Any] = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__magic_name__ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
if len(__magic_name__ ) > 0:
lowercase : int = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return values.split(''',''' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
lowerCAmelCase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 354 |
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_ = os.path.join(git_repo_path, 'src', 'diffusers')
class _A ( unittest.TestCase ):
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase : List[str] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowercase : str = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowercase : str = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_A , '''torch_and_transformers_and_onnx''' )
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _A )
self.assertIn('''torch_and_transformers''' , _A )
self.assertIn('''flax_and_transformers''' , _A )
self.assertIn('''torch_and_transformers_and_onnx''' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_A , '''\nCONSTANT = None\n''' )
lowercase : List[Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
lowercase : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
lowercase : List[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_A , _A )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
lowercase : int = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _A )
| 116 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=None , ):
__a : int = parent
__a : Dict = batch_size
__a : Any = image_size
__a : Dict = patch_size
__a : Optional[Any] = num_channels
__a : Optional[Any] = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Union[str, Any] = initializer_range
__a : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a : List[Any] = (image_size // patch_size) ** 2
__a : Any = num_patches + 1
def _lowerCamelCase ( self ):
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.type_sequence_label_size
__a : int = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Tuple = 1
__a : Tuple = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : Dict = config_and_inputs
__a : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : int = ViTMSNModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
__a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Union[str, Any]:
__a : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
torch.manual_seed(2 )
__a : Union[str, Any] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(_UpperCAmelCase )
__a : Union[str, Any] = self.default_image_processor
__a : int = prepare_img()
__a : str = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : Any = model(**_UpperCAmelCase )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Tuple = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 160 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 160 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Union[str, Any]=32 * 4 , UpperCAmelCase__ : Optional[Any]=32 * 6 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Optional[int]=32 , ) -> Optional[Any]:
_a : Optional[int] = parent
_a : Optional[Any] = batch_size
_a : Any = is_training
_a : Tuple = use_auxiliary_loss
_a : str = num_queries
_a : Dict = num_channels
_a : List[str] = min_size
_a : Optional[Any] = max_size
_a : str = num_labels
_a : str = mask_feature_size
def _lowercase ( self : int ) -> int:
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_a : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_a : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_a : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self : int ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowercase ( self : str ) -> List[str]:
_a : Tuple = self.prepare_config_and_inputs()
_a : Optional[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> int:
_a : Optional[int] = output.encoder_hidden_states
_a : List[str] = output.pixel_decoder_hidden_states
_a : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=False ) -> Optional[int]:
with torch.no_grad():
_a : List[Any] = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_a : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_a : Optional[int] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : int = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(UpperCAmelCase__ : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_a : Union[str, Any] = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_a : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCamelCase : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase : str = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : int = MaskFormerModelTester(self )
_a : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _lowercase ( self : List[str] ) -> Tuple:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _lowercase ( self : Dict ) -> int:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _lowercase ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : List[Any] ) -> Tuple:
pass
def _lowercase ( self : int ) -> Optional[int]:
_a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(lowerCamelCase__ )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def _lowercase ( self : int ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : Any = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : Optional[Any] = (self.model_tester.min_size,) * 2
_a : Dict = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_a : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
_a : List[str] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self : str ) -> List[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _lowercase ( self : Any ) -> Optional[Any]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_a : Tuple = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self : List[str] ) -> Optional[int]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : List[str] = self.all_model_classes[1]
_a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_a : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def _lowercase ( self : Optional[Any] ) -> Any:
_a : List[str] = self.all_model_classes[1]
_a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Optional[Any] = True
_a : List[Any] = True
_a : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_a : str = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case = 1e-4
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _lowercase ( self : Dict ) -> str:
_a : Union[str, Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase__ )
_a : Dict = self.default_image_processor
_a : int = prepare_img()
_a : Any = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : List[Any] = model(**lowerCamelCase__ )
_a : str = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_a : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_a : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : str ) -> Tuple:
_a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
_a : int = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_a : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Union[str, Any] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_a : str = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Optional[int] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(lowerCamelCase__ )
.eval()
)
_a : Tuple = self.default_image_processor
_a : str = prepare_img()
_a : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : Optional[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_a : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_a : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_a : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : List[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : Tuple ) -> List[Any]:
_a : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
_a : str = self.default_image_processor
_a : Dict = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_a : Optional[Any] = inputs['''pixel_values'''].to(lowerCamelCase__ )
_a : Optional[Any] = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
_a : List[str] = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_a : Optional[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 357 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = ["""pixel_values"""]
def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[int] , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Tuple = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase :Any = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase :Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase :int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="""crop_size""" )
UpperCamelCase :Tuple = do_resize
UpperCamelCase :Optional[int] = size
UpperCamelCase :List[Any] = resample
UpperCamelCase :Union[str, Any] = do_center_crop
UpperCamelCase :Tuple = crop_size
UpperCamelCase :Union[str, Any] = do_rescale
UpperCamelCase :Dict = rescale_factor
UpperCamelCase :Optional[Any] = do_normalize
UpperCamelCase :Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase :str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase :str = do_convert_rgb
def _A ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
UpperCamelCase :Tuple = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase :int = get_resize_output_image_size(__lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ):
UpperCamelCase :str = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : List[str] , ):
UpperCamelCase :Dict = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :List[str] = size if size is not None else self.size
UpperCamelCase :str = get_size_dict(__lowerCamelCase , param_name="""size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Optional[Any] = resample if resample is not None else self.resample
UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :List[str] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :str = get_size_dict(__lowerCamelCase , param_name="""crop_size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :int = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :List[str] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase :Dict = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase :Optional[int] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase :Dict = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase :Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase :str = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase :List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase :str = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCamelCase :Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase :str = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 38 |
"""simple docstring"""
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =set()
# Replace all the whitespace in our sentence
a =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase ) == 26
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =[False] * 26
for char in input_str:
if char.islower():
a =True
elif char.isupper():
a =True
return all(lowercase )
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _A ( ):
"""simple docstring"""
from timeit import timeit
a ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowercase ) )
print(timeit('''is_pangram_faster()''' , setup=lowercase ) )
print(timeit('''is_pangram_fastest()''' , setup=lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 81 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowercase ( _lowercase ):
a = """Speech2TextFeatureExtractor"""
a = """Speech2TextTokenizer"""
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = self.feature_extractor
lowerCamelCase__ : str = False
def __call__( self: Tuple , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowerCamelCase__ : Dict = kwargs.pop("""raw_speech""" )
else:
lowerCamelCase__ : str = kwargs.pop("""audio""" , UpperCamelCase__ )
lowerCamelCase__ : Tuple = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
lowerCamelCase__ : List[str] = kwargs.pop("""text""" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCamelCase__ : Union[str, Any] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowerCamelCase__ : Dict = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
lowerCamelCase__ : str = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Any = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self: Tuple , *UpperCamelCase__: Dict , **UpperCamelCase__: List[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: int , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@contextmanager
def lowerCamelCase_ ( self: int ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowerCamelCase__ : str = True
lowerCamelCase__ : Any = self.tokenizer
yield
lowerCamelCase__ : Tuple = self.feature_extractor
lowerCamelCase__ : Any = False
| 368 |
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 129 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
_lowercase = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
_lowercase = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict=None ,A_ : Union[str, Any]=None ,A_ : List[Any]=None ,A_ : List[Any]=None ,A_ : Optional[Any]="auto" ,A_ : Optional[int]=-1 ,A_ : Dict=0.9 ,A_ : Tuple=5 ,A_ : int=500 ,A_ : List[Any]="gpt2-large" ,A_ : Dict=-1 ,A_ : int=1024 ,A_ : Optional[Any]=25 ,A_ : str=5 ,A_ : Dict=True ,A_ : Optional[int]=25 ,) -> str:
A = compute_mauve(
p_text=A_ ,q_text=A_ ,p_features=A_ ,q_features=A_ ,p_tokens=A_ ,q_tokens=A_ ,num_buckets=A_ ,pca_max_data=A_ ,kmeans_explained_var=A_ ,kmeans_num_redo=A_ ,kmeans_max_iter=A_ ,featurize_model_name=A_ ,device_id=A_ ,max_text_length=A_ ,divergence_curve_discretization_size=A_ ,mauve_scaling_factor=A_ ,verbose=A_ ,seed=A_ ,)
return out
| 74 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__UpperCAmelCase : Optional[int] = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
__UpperCAmelCase : Optional[int] = '''huggingface/label-files'''
if "o365" in model_name:
__UpperCAmelCase : Tuple = 3_66
__UpperCAmelCase : List[str] = '''object365-id2label.json'''
else:
__UpperCAmelCase : Any = 91
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = val
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCAmelCase : List[Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:dim, :]
__UpperCAmelCase : List[str] = in_proj_bias[: dim]
__UpperCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Any = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Tuple = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : int = in_proj_bias[-dim :]
# fmt: on
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[str] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : Tuple = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:hidden_size]
__UpperCAmelCase : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size:]
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Dict = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__UpperCAmelCase : str = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
__UpperCAmelCase : int = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : Optional[Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
if "input_proj" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Union[str, Any] = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_UpperCAmelCase )
# load image processor
__UpperCAmelCase : str = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Optional[int] = processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
__UpperCAmelCase : List[str] = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : str = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCAmelCase : str = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger("""transformers.models.encodec""")
SCREAMING_SNAKE_CASE_ = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
SCREAMING_SNAKE_CASE_ = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE_ = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE_ = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
SCREAMING_SNAKE_CASE_ = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
SCREAMING_SNAKE_CASE_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l0":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_ih_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_hh_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_ih_l1":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias_hh_l1":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if model_name == "encodec_24khz" or "encodec_32khz":
SCREAMING_SNAKE_CASE = MAPPING_24K
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F"""{name} was ignored""" )
continue
SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
if "*" in key:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight_ih_l0" in name:
SCREAMING_SNAKE_CASE = """weight_ih_l0"""
elif "weight_hh_l0" in name:
SCREAMING_SNAKE_CASE = """weight_hh_l0"""
elif "bias_ih_l0" in name:
SCREAMING_SNAKE_CASE = """bias_ih_l0"""
elif "bias_hh_l0" in name:
SCREAMING_SNAKE_CASE = """bias_hh_l0"""
elif "weight_ih_l1" in name:
SCREAMING_SNAKE_CASE = """weight_ih_l1"""
elif "weight_hh_l1" in name:
SCREAMING_SNAKE_CASE = """weight_hh_l1"""
elif "bias_ih_l1" in name:
SCREAMING_SNAKE_CASE = """bias_ih_l1"""
elif "bias_hh_l1" in name:
SCREAMING_SNAKE_CASE = """bias_hh_l1"""
elif "bias" in name:
SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
SCREAMING_SNAKE_CASE = """weight"""
elif "running_mean" in name:
SCREAMING_SNAKE_CASE = """running_mean"""
elif "running_var" in name:
SCREAMING_SNAKE_CASE = """running_var"""
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE = """num_batches_tracked"""
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
SCREAMING_SNAKE_CASE = [8, 5, 4, 4]
SCREAMING_SNAKE_CASE = [2.2]
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = 3_20_00
SCREAMING_SNAKE_CASE = 20_48
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE = [8, 5, 4, 2]
SCREAMING_SNAKE_CASE = [3.0, 6.0, 12.0, 24.0]
SCREAMING_SNAKE_CASE = 4_80_00
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = """time_group_norm"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
SCREAMING_SNAKE_CASE = EncodecModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
SCREAMING_SNAKE_CASE = original_checkpoint["""best_state"""]
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 193 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE_ = """\
Text data.
Second line of data."""
SCREAMING_SNAKE_CASE_ = """file"""
@pytest.fixture(scope="""session""" )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """custom_cache"""
SCREAMING_SNAKE_CASE = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE = xz_file
SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
SCREAMING_SNAKE_CASE = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
SCREAMING_SNAKE_CASE = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
SCREAMING_SNAKE_CASE = """./__missing_file__.txt"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("""https://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("""ftp://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("""s3://huggingface.co""" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("""s3://huggingface.co""" )
| 193 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( _snake_case ):
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : ScoreSdeVeScheduler
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 2000 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
snake_case : List[Any] = self.unet.config.sample_size
snake_case : Optional[Any] = (batch_size, 3, img_size, img_size)
snake_case : Dict = self.unet
snake_case : Optional[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
snake_case : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
snake_case : str = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
snake_case : Tuple = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
snake_case : str = model(_UpperCamelCase , _UpperCamelCase ).sample
snake_case : List[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
snake_case : Any = output.prev_sample, output.prev_sample_mean
snake_case : Any = sample_mean.clamp(0 , 1 )
snake_case : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Tuple = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 203 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 0 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 359 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
lowerCAmelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase__ : Union[str, Any] = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase__ : Dict = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase__ : Dict = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase__ : List[str] = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(snake_case__ )-1}""" )
if "norm" in key:
lowerCAmelCase__ : str = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase__ : Union[str, Any] = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase__ : Tuple = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(snake_case__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase__ : List[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase__ : Optional[int] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase__ : Optional[Any] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase__ : Dict = key.replace(f"""block{idx}""" , f"""block.{int(snake_case__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase__ : Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase__ : Dict = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase__ : List[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase__ : Dict = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase__ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase__ : Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase__ : Any = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase__ : List[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase__ : Any = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase__ : Dict = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(snake_case__ )-1}""" )
if key.startswith("""head""" ):
lowerCAmelCase__ : Tuple = key.replace("""head""" , """classifier""" )
lowerCAmelCase__ : Optional[int] = value
return new_state_dict
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase__ : int = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase__ : Any = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase__ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase__ : List[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowerCAmelCase__ : str = SegformerConfig()
lowerCAmelCase__ : Optional[int] = False
# set attributes based on model_name
lowerCAmelCase__ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase__ : List[str] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase__ : List[str] = 150
lowerCAmelCase__ : int = """ade20k-id2label.json"""
lowerCAmelCase__ : str = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase__ : str = 19
lowerCAmelCase__ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase__ : int = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = model_name[4:6]
lowerCAmelCase__ : Dict = 1000
lowerCAmelCase__ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Union[str, Any] = (1, 1000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
lowerCAmelCase__ : Tuple = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : int = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase__ : Tuple = [64, 128, 320, 512]
lowerCAmelCase__ : Optional[Any] = 256
elif size == "b2":
lowerCAmelCase__ : Dict = [64, 128, 320, 512]
lowerCAmelCase__ : Any = 768
lowerCAmelCase__ : List[Any] = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase__ : int = [64, 128, 320, 512]
lowerCAmelCase__ : Optional[Any] = 768
lowerCAmelCase__ : Optional[int] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase__ : Tuple = [64, 128, 320, 512]
lowerCAmelCase__ : int = 768
lowerCAmelCase__ : str = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase__ : Optional[int] = [64, 128, 320, 512]
lowerCAmelCase__ : List[Any] = 768
lowerCAmelCase__ : int = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
lowerCAmelCase__ : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
# prepare image
lowerCAmelCase__ : Any = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
lowerCAmelCase__ : Any = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase__ : Optional[int] = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase__ : List[str] = rename_keys(snake_case__ , encoder_only=snake_case__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Any = SegformerForImageClassification(snake_case__ )
else:
lowerCAmelCase__ : List[Any] = SegformerForSemanticSegmentation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
lowerCAmelCase__ : List[Any] = model(snake_case__ )
lowerCAmelCase__ : Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase__ : str = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase__ : List[str] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase__ : Optional[int] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase__ : Tuple = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase__ : Any = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase__ : Optional[int] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase__ : List[str] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase__ : str = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase__ : Tuple = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase__ : Tuple = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase__ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase__ : int = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase__ : List[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_A = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 242 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'dpt'
def __init__( self : int , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Optional[Any]=384 , lowerCAmelCase : str=16 , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=True , lowerCAmelCase : Tuple=[2, 5, 8, 11] , lowerCAmelCase : Tuple="project" , lowerCAmelCase : Optional[int]=[4, 2, 1, 0.5] , lowerCAmelCase : Any=[96, 192, 384, 768] , lowerCAmelCase : int=256 , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Dict=255 , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=[1, 1024, 24, 24] , lowerCAmelCase : str=[0, 1] , lowerCAmelCase : str=None , **lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowerCAmelCase = BitConfig(**lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __lowercase ( self : Any ):
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 155 | 0 |
def lowerCamelCase__ ( _lowercase = 1000000 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
UpperCAmelCase_ : int = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 235 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 235 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
a_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
a_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
a_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int , snake_case : Dict , snake_case : Dict=None , snake_case : Optional[int]="uniform_average" , snake_case : Dict=True ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 175 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : str = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Any = jax.device_count()
UpperCamelCase_ : List[str] = num_samples * [prompt]
UpperCamelCase_ : List[Any] = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : Dict = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCamelCase_ : Tuple = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : Optional[Any] = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'stabilityai/stable-diffusion-2'
UpperCamelCase_, UpperCamelCase_ : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case , subfolder='scheduler' )
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
snake_case , scheduler=snake_case , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase_ : Optional[int] = scheduler_params
UpperCamelCase_ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase_ : Union[str, Any] = jax.device_count()
UpperCamelCase_ : Union[str, Any] = num_samples * [prompt]
UpperCamelCase_ : Tuple = sd_pipe.prepare_inputs(snake_case )
UpperCamelCase_ : List[Any] = replicate(snake_case )
UpperCamelCase_ : Optional[Any] = shard(snake_case )
UpperCamelCase_ : Tuple = jax.random.PRNGKey(0 )
UpperCamelCase_ : str = jax.random.split(snake_case , jax.device_count() )
UpperCamelCase_ : str = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=2_5 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_ : int = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_ : Union[str, Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 175 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCamelCase = get_logger()
_lowerCamelCase = None
class _snake_case (TensorFormatter[Mapping, "jax.Array", Mapping]):
def __init__( self ,_snake_case=None ,_snake_case=None ,**_snake_case ):
super().__init__(features=_snake_case )
import jax
from jaxlib.xla_client import Device
if isinstance(_snake_case ,_snake_case ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(_snake_case )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ : List[str] = device if isinstance(_snake_case ,_snake_case ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase_ : List[str] = str(jax.devices()[0] )
UpperCAmelCase_ : Optional[int] = jnp_array_kwargs
@staticmethod
def UpperCamelCase__ ( ):
import jax
return {str(_snake_case ): device for device in jax.devices()}
def UpperCamelCase__ ( self ,_snake_case ):
import jax
import jax.numpy as jnp
if isinstance(_snake_case ,_snake_case ) and column:
if all(
isinstance(_snake_case ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_snake_case ,axis=0 )
return column
def UpperCamelCase__ ( self ,_snake_case ):
import jax
import jax.numpy as jnp
if isinstance(_snake_case ,(str, bytes, type(_snake_case )) ):
return value
elif isinstance(_snake_case ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
UpperCAmelCase_ : int = {}
if isinstance(_snake_case ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ : Optional[Any] = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ : Dict = {"dtype": jnp.intaa}
elif isinstance(_snake_case ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
UpperCAmelCase_ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case ,PIL.Image.Image ):
UpperCAmelCase_ : Any = np.asarray(_snake_case )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : Any = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_snake_case ,**{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase__ ( self ,_snake_case ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_snake_case ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_snake_case ,"__array__" ) and not isinstance(_snake_case ,jax.Array ):
UpperCAmelCase_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
elif isinstance(_snake_case ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
return self._tensorize(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return map_nested(self._recursive_tensorize ,_snake_case ,map_list=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(_snake_case )
return self.recursive_tensorize(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = self.numpy_arrow_extractor().extract_column(_snake_case )
UpperCAmelCase_ : str = self.python_features_decoder.decode_column(_snake_case ,pa_table.column_names[0] )
UpperCAmelCase_ : Optional[int] = self.recursive_tensorize(_snake_case )
UpperCAmelCase_ : int = self._consolidate(_snake_case )
return column
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.python_features_decoder.decode_batch(_snake_case )
UpperCAmelCase_ : List[Any] = self.recursive_tensorize(_snake_case )
for column_name in batch:
UpperCAmelCase_ : Tuple = self._consolidate(batch[column_name] )
return batch
| 67 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
UpperCAmelCase_ : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
UpperCAmelCase_ : Tuple = init[0]
UpperCAmelCase_ : List[Any] = init[1]
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase_ : List[str] = [[f, g, x, y]]
UpperCAmelCase_ : Tuple = False # flag that is set when search is complete
UpperCAmelCase_ : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase_ : Dict = cell.pop()
UpperCAmelCase_ : Tuple = next_cell[2]
UpperCAmelCase_ : str = next_cell[3]
UpperCAmelCase_ : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase_ : Optional[Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
UpperCAmelCase_ : Union[str, Any] = x + DIRECTIONS[i][0]
UpperCAmelCase_ : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase_ : Any = g + cost
UpperCAmelCase_ : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : List[str] = i
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = goal[0]
UpperCAmelCase_ : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase_ : Optional[int] = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase_ : Optional[int] = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase_ : Optional[Any] = xa
UpperCAmelCase_ : List[str] = ya
invpath.append([x, y] )
UpperCAmelCase_ : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowerCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowerCamelCase = [0, 0]
# all coordinates are given in format [y,x]
_lowerCamelCase = [len(grid) - 1, len(grid[0]) - 1]
_lowerCamelCase = 1
# the cost map which pushes the path closer to the goal
_lowerCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowerCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowerCamelCase = 99
_lowerCamelCase , _lowerCamelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 67 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCAmelCase = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = "A painting of a squirrel eating a burger "
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = generator.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = "A painting of a squirrel eating a burger "
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 195 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Union[str, Any] = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[str] = "canine"
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=768 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=1_6384 , snake_case__ : str=16 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=1E-12 , snake_case__ : Any=0 , snake_case__ : Optional[int]=0xe_000 , snake_case__ : List[str]=0xe_001 , snake_case__ : List[str]=4 , snake_case__ : List[str]=4 , snake_case__ : List[Any]=8 , snake_case__ : List[str]=1_6384 , snake_case__ : Union[str, Any]=128 , **snake_case__ : Tuple , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase_ : Tuple =max_position_embeddings
lowerCamelCase_ : Optional[int] =hidden_size
lowerCamelCase_ : Tuple =num_hidden_layers
lowerCamelCase_ : Dict =num_attention_heads
lowerCamelCase_ : str =intermediate_size
lowerCamelCase_ : Dict =hidden_act
lowerCamelCase_ : List[Any] =hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Tuple =type_vocab_size
lowerCamelCase_ : Optional[Any] =layer_norm_eps
# Character config:
lowerCamelCase_ : List[str] =downsampling_rate
lowerCamelCase_ : List[Any] =upsampling_kernel_size
lowerCamelCase_ : Any =num_hash_functions
lowerCamelCase_ : Optional[int] =num_hash_buckets
lowerCamelCase_ : Union[str, Any] =local_transformer_stride
| 144 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 173 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : int = 0
@slow
def __lowerCamelCase ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(SCREAMING_SNAKE_CASE__ ) , 0 )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Check that tokenizer_type ≠ model_type
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' ) )
lowercase : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''bert''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(SCREAMING_SNAKE_CASE__ , '''merges.txt''' ) )
lowercase : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __lowerCamelCase ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowercase : str = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCamelCase ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowercase : Any = TOKENIZER_MAPPING.values()
lowercase : Tuple = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , SCREAMING_SNAKE_CASE__ )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = '''Hello, world. How are you?'''
lowercase : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCamelCase ( self ):
lowercase : int = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# Check we can load the tokenizer config of an online model.
lowercase : Optional[Any] = get_tokenizer_config('''bert-base-cased''' )
lowercase : str = config.pop('''_commit_hash''' , SCREAMING_SNAKE_CASE__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase : Union[str, Any] = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = get_tokenizer_config(SCREAMING_SNAKE_CASE__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
lowercase : int = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self ):
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
# Can register in two steps
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCamelCase ( self ):
class __SCREAMING_SNAKE_CASE ( A__ ):
A : str = False
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = NewTokenizer
A : Optional[int] = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , fast_tokenizer_class=SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowercase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
lowercase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='''aaaaaa''' )
def __lowerCamelCase ( self ):
# Make sure we have cached the tokenizer.
lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowercase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 173 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_lowercase : int ={
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_lowercase : Union[str, Any] =logging.WARNING
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : List[Any] = os.getenv("""DATASETS_VERBOSITY""" , _lowercase)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
return __name__.split(""".""")[0]
def lowerCAmelCase_ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name())
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
# Apply our default configuration to the library root logger.
a__ : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : List[str] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def lowerCAmelCase_ ( _lowercase : Optional[str] = None) -> logging.Logger:
"""simple docstring"""
if name is None:
a__ : Union[str, Any] = _get_library_name()
return logging.getLogger(_lowercase)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase_ ( _lowercase : int) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(_lowercase)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
return set_verbosity(_lowercase)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
return set_verbosity(_lowercase)
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(_lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
return set_verbosity(_lowercase)
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Tuple = False
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case__ :
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
a__ : Dict = args[0] if args else None
def __iter__( self ) -> List[str]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , __lowercase ) -> Dict:
"""simple docstring"""
def empty_fn(*__lowercase , **__lowercase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[Any]:
"""simple docstring"""
return self
def __exit__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
return
_lowercase : int =True
class snake_case__ :
"""simple docstring"""
def __call__( self , *__lowercase , __lowercase=False , **__lowercase ) -> int:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowercase , **__lowercase )
else:
return EmptyTqdm(*__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , *__lowercase , **__lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowercase : List[str] =_tqdm_cls()
def lowerCAmelCase_ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
global _tqdm_active
a__ : Optional[Any] = True
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
global _tqdm_active
a__ : Optional[Any] = False
| 170 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowercase : List[str] =None
_lowercase : Union[str, Any] =logging.get_logger(__name__)
_lowercase : Optional[int] ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Dict ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowercase : str ={
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_lowercase : Dict ="▁"
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
__lowerCAmelCase :Any = BarthezTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , **__lowercase , ) -> str:
"""simple docstring"""
a__ : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , **__lowercase , )
a__ : List[str] = vocab_file
a__ : List[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
a__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Tuple = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 170 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowercase : list , _lowercase : int ):
'''simple docstring'''
if len(_lowercase ) <= 1 or n <= 1:
return
insert_next(_lowercase , n - 1 )
rec_insertion_sort(_lowercase , n - 1 )
def _a ( _lowercase : list , _lowercase : int ):
'''simple docstring'''
if index >= len(_lowercase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase : int = (
collection[index],
collection[index - 1],
)
insert_next(_lowercase , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase :Any = input("Enter integers separated by spaces: ")
__UpperCAmelCase :list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 240 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(_lowercase )
# Let's go
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(_lowercase , '''func''' ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Any = args.func(_lowercase )
service.run()
if __name__ == "__main__":
main()
| 240 | 1 |
def __lowerCamelCase ( lowerCamelCase__ = 100 ):
"""simple docstring"""
lowercase__ : List[Any] = n * (n + 1) * (2 * n + 1) / 6
lowercase__ : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 130 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowerCamelCase_ ( _a : Dict , _a : str ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_ ( _a : List[str] , _a : Any ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase_ : Any = test_hf_cache_home / "datasets"
UpperCAmelCase_ : int = test_hf_cache_home / "metrics"
UpperCAmelCase_ : List[str] = test_hf_cache_home / "modules"
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_lowerCamelCase ) )
UpperCAmelCase_ : Dict = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_lowerCamelCase ) )
UpperCAmelCase_ : List[str] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowerCamelCase ) )
@pytest.fixture(autouse=_lowerCamelCase , scope="""session""" )
def lowerCamelCase_ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCamelCase )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _lowerCamelCase )
| 369 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "The name of the task to train on."} , )
A__ : Optional[List[str]] = dataclasses.field(
default=__snake_case , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A__ : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[int] = dataclasses.field(
default=__snake_case , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase_ ( _a : str , _a : List[Any] , _a : List[Any] , _a : Dict , _a : int , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : List[str] = dataset.filter(lambda _a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : List[str] = int(eval_result * len(_a ) )
print(_a )
UpperCAmelCase_ : int = dataset.sort("""probability""" , reverse=_a )
UpperCAmelCase_ : Optional[int] = dataset.select(range(_a ) )
UpperCAmelCase_ : List[str] = dataset.remove_columns(["""label""", """probability"""] )
UpperCAmelCase_ : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
UpperCAmelCase_ : Union[str, Any] = dataset.map(lambda _a : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : int = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : int = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def lowerCamelCase_ ( _a : Any , _a : int , _a : Dict , _a : List[Any] , **_a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=_a )
UpperCAmelCase_ : str = STDataArguments(train_file=_a , infer_file=_a )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=_a )
UpperCAmelCase_ : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : Dict = args.eval_file
for key in data_files:
UpperCAmelCase_ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCAmelCase_ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCAmelCase_ : int = F'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCAmelCase_ : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = False
# Show the progress bar
UpperCAmelCase_ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : Any = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[str] = os.path.join(_a , """stage-1""" )
UpperCAmelCase_ : Optional[int] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : Any = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Dict = os.path.join(_a , """best-checkpoint""" )
UpperCAmelCase_ : str = os.path.join(_a , """stage-2""" )
# Update arguments_dict
UpperCAmelCase_ : Union[str, Any] = model_path
UpperCAmelCase_ : Dict = data_files["""train"""]
UpperCAmelCase_ : List[str] = current_output_dir
UpperCAmelCase_ : str = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _a )
UpperCAmelCase_ : Optional[Any] = iteration
UpperCAmelCase_ : List[str] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(os.path.join(_a , """best-checkpoint""" ) )
UpperCAmelCase_ : str = config.idalabel
UpperCAmelCase_ : Union[str, Any] = os.path.join(_a , """eval_results_best-checkpoint.json""" )
UpperCAmelCase_ : int = os.path.join(_a , """test_results_best-checkpoint.json""" )
assert os.path.exists(_a )
with open(_a , """r""" ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(_a )[args.eval_metric] )
UpperCAmelCase_ : Dict = os.path.join(_a , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : Optional[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCAmelCase_ : List[str] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Tuple = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Optional[int] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : List[str] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
UpperCAmelCase_ : int = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : Dict = new_iteration
UpperCAmelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _a )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
| 59 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
SCREAMING_SNAKE_CASE : Any = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase ( _snake_case : TreeNode | None ) ->int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_snake_case : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case , __snake_case : int = get_distrib(node.left )
__snake_case , __snake_case : List[str] = get_distrib(node.right )
__snake_case : Any = 1 - left_distrib_excess
__snake_case : Tuple = 1 - right_distrib_excess
__snake_case : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
__snake_case : Dict = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=[2, 3, 4] , a_=None , ):
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : str = batch_size
__snake_case : List[Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : str = num_stages
__snake_case : Any = hidden_sizes
__snake_case : Optional[int] = depths
__snake_case : Dict = is_training
__snake_case : Tuple = use_labels
__snake_case : str = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Dict = num_labels
__snake_case : Tuple = initializer_range
__snake_case : Dict = out_features
__snake_case : Optional[int] = out_indices
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : str = None
__snake_case : Optional[Any] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : int = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.train()
__snake_case : Tuple = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : List[str] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : Optional[Any] = False
__snake_case : Tuple = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : Union[str, Any] = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case : str = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : Union[str, Any] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
def check_hidden_states_output(a_ , a_ , a_ ):
__snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : List[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(a_ )
__snake_case : Dict = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : Tuple = preprocessor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**a_ )
# verify the logits
__snake_case : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : Any = torch.tensor([0.9996, 0.1966, -0.4386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 102 | 1 |
import enum
import shutil
import sys
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = shutil.get_terminal_size()
UpperCAmelCase_ : List[Any] = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
snake_case__ : Optional[int] = 0
snake_case__ : int = 1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Tuple="" ) -> int:
"""simple docstring"""
sys.stdout.write(str(__magic_name__ ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Optional[Any]="" ) -> Dict:
"""simple docstring"""
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
forceWrite("""\r""" )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : str ) -> str:
"""simple docstring"""
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
"""simple docstring"""
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 62 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : List[Any] = StableDiffusionLDMaDPipeline
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase :Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :Optional[int] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[str] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Union[str, Any] ):
UpperCamelCase :str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Any = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1]
UpperCamelCase :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :int = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCamelCase :Dict = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : str ):
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :int = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :Tuple = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :List[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase :List[Any] = ldmad_pipe.tokenizer(
__lowerCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="""pt""" , )
UpperCamelCase :List[str] = text_inputs["""input_ids"""].to(__lowerCamelCase )
UpperCamelCase :List[Any] = ldmad_pipe.text_encoder(__lowerCamelCase )[0]
UpperCamelCase :Dict = prompt_embeds
# forward
UpperCamelCase :str = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = output.rgb, output.depth
UpperCamelCase :Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : List[Any] ):
UpperCamelCase :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Any = self.get_dummy_components()
UpperCamelCase :Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
UpperCamelCase :Tuple = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[int] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :str = """french fries"""
UpperCamelCase :Optional[int] = ldmad_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :List[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase :str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :Dict = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCamelCase :Any = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Tuple="cpu" , __lowerCamelCase : str=torch.floataa , __lowerCamelCase : Tuple=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :str = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase :Any = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[int] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase :Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase :Tuple = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCamelCase :Optional[Any] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : int=torch.floataa , __lowerCamelCase : Union[str, Any]=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : str ):
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Union[str, Any] = output.rgb, output.depth
UpperCamelCase :Dict = 0.495586
UpperCamelCase :Dict = 0.33795515
UpperCamelCase :Union[str, Any] = 112.48518
UpperCamelCase :Any = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :int = output.rgb, output.depth
UpperCamelCase :Optional[int] = 0.4194127
UpperCamelCase :str = 0.35375586
UpperCamelCase :Union[str, Any] = 0.5638502
UpperCamelCase :Union[str, Any] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 62 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Optional[Any] = args.pruning_method
_lowercase : Tuple = args.threshold
_lowercase : int = args.model_name_or_path.rstrip('/' )
_lowercase : List[str] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : Optional[int] = torch.load(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
_lowercase : Dict = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Union[str, Any] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : List[str] = MagnitudeBinarizer.apply(inputs=lowerCamelCase_ , threshold=lowerCamelCase_ )
_lowercase : str = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Dict = name[:-6]
_lowercase : Optional[Any] = model[F'''{prefix_}mask_scores''']
_lowercase : Union[str, Any] = TopKBinarizer.apply(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Dict = model[F'''{prefix_}mask_scores''']
_lowercase : List[Any] = ThresholdBinarizer.apply(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Tuple = model[F'''{prefix_}mask_scores''']
_lowercase , _lowercase : Any = -0.1, 1.1
_lowercase : Dict = torch.sigmoid(lowerCamelCase_ )
_lowercase : List[Any] = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
_lowercase : Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : Tuple = os.path.join(
os.path.dirname(lowerCamelCase_ ) , F'''bertarized_{os.path.basename(lowerCamelCase_ )}''' )
if not os.path.isdir(lowerCamelCase_ ):
shutil.copytree(lowerCamelCase_ , lowerCamelCase_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args)
| 21 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 1 |
from math import isqrt
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case_ ) + 1 ) )
def lowerCamelCase__ ( snake_case_ : int = 10**6 ) -> int:
__snake_case = 0
__snake_case = 1
__snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 238 |
from __future__ import annotations
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'MIT'
snake_case_ = '1.0.0'
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = '[email protected]'
snake_case_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def a (self : Tuple , a__ : str , a__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a__ )
self.urls.append(a__ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return ".".join(get_sub_domain_name(snake_case_ ).split('''.''' )[-2:] )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return parse.urlparse(snake_case_ ).netloc
def lowerCamelCase__ ( snake_case_ : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(snake_case_ )
# Initialize the parser
__snake_case = Parser(snake_case_ )
try:
# Open URL
__snake_case = requests.get(snake_case_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(snake_case_ )
# Get the valid email.
__snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case_ )
if __name__ == "__main__":
snake_case_ = emails_from_url('https://github.com')
print(F'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 238 | 1 |
'''simple docstring'''
import math
def lowerCamelCase ():
__a : Tuple = input('Enter message: ' )
__a : Optional[Any] = int(input(F"""Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: """ ) )
__a : Any = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
__a : Dict = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith('d' ):
__a : Dict = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
__a : str = [""] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
__a : str = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Any = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
__a : Union[str, Any] = key
__a : Union[str, Any] = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
__a : List[Any] = [""] * num_cols
__a : Optional[int] = 0
__a : Union[str, Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__a : Optional[int] = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 27 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 0 |
'''simple docstring'''
from math import pow, sqrt
def UpperCamelCase_ ( *A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = len(A__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCamelCase_ ( A__ : Dict , A__ : Optional[int] ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def UpperCamelCase_ ( A__ : List[str] , A__ : Optional[Any] , A__ : Tuple ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCamelCase_ ( A__ : Dict , A__ : str , A__ : Optional[Any] ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : str , A__ : List[Any] ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCamelCase_ ( A__ : int , A__ : int , A__ : List[str] ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A__ , A__ , A__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 358 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[Any] , A__ : str=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase_ : str = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase_ : str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ : List[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
lowerCAmelCase_ : Any = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ : Tuple = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase_ : int = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
lowerCAmelCase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ : str = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
lowerCAmelCase_ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ : Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ : Dict = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
lowerCAmelCase_ : int = key.replace("""head""" , """classifier""" )
lowerCAmelCase_ : int = value
return new_state_dict
def UpperCamelCase_ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ : int = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : str = SegformerConfig()
lowerCAmelCase_ : Optional[Any] = False
# set attributes based on model_name
lowerCAmelCase_ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase_ : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Tuple = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowerCAmelCase_ : List[str] = 19
lowerCAmelCase_ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase_ : List[str] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = model_name[4:6]
lowerCAmelCase_ : Union[str, Any] = 10_00
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : int = 2_56
elif size == "b2":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ : List[str] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Union[str, Any] = 7_68
lowerCAmelCase_ : Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : str = 7_68
lowerCAmelCase_ : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase_ : str = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase_ : List[str] = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase_ : Dict = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = SegformerForImageClassification(A__ )
else:
lowerCAmelCase_ : str = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
lowerCAmelCase_ : Tuple = model(A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ : str = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: str = "▁" , snake_case: bool = True , snake_case: Union[str, AddedToken] = "<unk>" , snake_case: Union[str, AddedToken] = "</s>" , snake_case: Union[str, AddedToken] = "<pad>" , ) -> Any:
snake_case_ :Any = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case_ :Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ :Tuple = token_dict["""token"""]
snake_case_ :Union[str, Any] = Tokenizer(Unigram() )
snake_case_ :Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case_ :str = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case , add_prefix_space=snake_case ),
pre_tokenizers.Digits(individual_digits=snake_case ),
pre_tokenizers.Punctuation(),
] )
snake_case_ :Dict = decoders.Metaspace(replacement=snake_case , add_prefix_space=snake_case )
snake_case_ :str = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case_ :Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> int:
snake_case_ :List[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
if isinstance(snake_case , snake_case ):
snake_case_ :int = [files]
self._tokenizer.train(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: Dict , snake_case: Union[Iterator[str], Iterator[Iterator[str]]] , snake_case: int = 8_000 , snake_case: bool = True , ) -> List[str]:
snake_case_ :Optional[Any] = trainers.UnigramTrainer(
vocab_size=snake_case , special_tokens=self.special_tokens_list , show_progress=snake_case , )
self._tokenizer.train_from_iterator(snake_case , trainer=snake_case )
self.add_unk_id()
def lowerCAmelCase_ ( self: List[Any] ) -> Tuple:
snake_case_ :Dict = json.loads(self._tokenizer.to_str() )
snake_case_ :Optional[int] = self.special_tokens["""unk"""]["""id"""]
snake_case_ :Optional[int] = Tokenizer.from_str(json.dumps(snake_case ) )
| 66 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , snake_case )
__UpperCamelCase = range(1 , snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 316 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __A ( lowerCAmelCase ):
def __init__( self : List[str] , *UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Union[str, Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Any = eval_examples
lowerCAmelCase : List[Any] = post_process_function
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str = "eval" ):
lowerCAmelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase : Optional[Any] = self.get_eval_dataloader(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Optional[int] = self.compute_metrics
lowerCAmelCase : Any = None
lowerCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase : str = time.time()
try:
lowerCAmelCase : List[str] = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
lowerCAmelCase : Dict = compute_metrics
lowerCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase : str = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions )
lowerCAmelCase : List[str] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
lowerCAmelCase : Tuple = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
else:
lowerCAmelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str = "test" ):
lowerCAmelCase : int = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : int = self.compute_metrics
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase : Optional[Any] = time.time()
try:
lowerCAmelCase : Optional[Any] = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
lowerCAmelCase : Optional[int] = compute_metrics
lowerCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase : Dict = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , 'predict' )
lowerCAmelCase : Optional[Any] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
lowerCAmelCase : int = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
| 323 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=1 ,snake_case=False ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case_ )
lowercase : Union[str, Any] = vocab_size
lowercase : List[Any] = d_embed
lowercase : List[str] = d_proj
lowercase : Any = cutoffs + [vocab_size]
lowercase : List[str] = [0] + self.cutoffs
lowercase : Optional[int] = div_val
lowercase : Optional[Any] = self.cutoffs[0]
lowercase : List[str] = len(self.cutoffs ) - 1
lowercase : Optional[int] = self.shortlist_size + self.n_clusters
lowercase : Any = keep_order
lowercase : List[Any] = []
lowercase : Optional[Any] = []
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.n_clusters > 0:
lowercase : str = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="""zeros""" ,trainable=snake_case_ ,name="""cluster_weight""" )
lowercase : Dict = self.add_weight(
shape=(self.n_clusters,) ,initializer="""zeros""" ,trainable=snake_case_ ,name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase : Optional[int] = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_projs_._{i}" ,)
self.out_projs.append(snake_case_ )
else:
self.out_projs.append(snake_case_ )
lowercase : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_layers_._{i}_._weight" ,)
lowercase : List[Any] = self.add_weight(
shape=(self.vocab_size,) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_layers_._{i}_._bias" ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase : Optional[int] = self.d_embed // (self.div_val**i)
lowercase : int = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_projs_._{i}" )
self.out_projs.append(snake_case_ )
lowercase : Dict = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_layers_._{i}_._weight" ,)
lowercase : Any = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="""zeros""" ,trainable=snake_case_ ,name=f"out_layers_._{i}_._bias" ,)
self.out_layers.append((weight, bias) )
super().build(snake_case_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = x
if proj is not None:
lowercase : Union[str, Any] = tf.einsum("""ibd,ed->ibe""" ,snake_case_ ,snake_case_ )
return tf.einsum("""ibd,nd->ibn""" ,snake_case_ ,snake_case_ ) + b
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = shape_list(snake_case_ )
lowercase : Optional[Any] = tf.range(lp_size[0] ,dtype=target.dtype )
lowercase : Optional[int] = tf.stack([r, target] ,1 )
return tf.gather_nd(snake_case_ ,snake_case_ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case=True ,snake_case=False ):
'''simple docstring'''
lowercase : List[str] = 0
if self.n_clusters == 0:
lowercase : Tuple = self._logit(snake_case_ ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
lowercase : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case_ ,logits=snake_case_ )
lowercase : str = tf.nn.log_softmax(snake_case_ ,axis=-1 )
else:
lowercase : Tuple = shape_list(snake_case_ )
lowercase : Optional[int] = []
lowercase : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase , lowercase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase : Optional[int] = (target >= l_idx) & (target < r_idx)
lowercase : str = tf.where(snake_case_ )
lowercase : Union[str, Any] = tf.boolean_mask(snake_case_ ,snake_case_ ) - l_idx
if self.div_val == 1:
lowercase : Tuple = self.out_layers[0][0][l_idx:r_idx]
lowercase : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase : int = self.out_layers[i][0]
lowercase : Any = self.out_layers[i][1]
if i == 0:
lowercase : str = tf.concat([cur_W, self.cluster_weight] ,0 )
lowercase : Optional[int] = tf.concat([cur_b, self.cluster_bias] ,0 )
lowercase : int = self._logit(snake_case_ ,snake_case_ ,snake_case_ ,self.out_projs[0] )
lowercase : Optional[int] = tf.nn.log_softmax(snake_case_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase : List[Any] = tf.boolean_mask(snake_case_ ,snake_case_ )
lowercase : Dict = self._gather_logprob(snake_case_ ,snake_case_ )
else:
lowercase : Optional[int] = self._logit(snake_case_ ,snake_case_ ,snake_case_ ,self.out_projs[i] )
lowercase : Optional[int] = tf.nn.log_softmax(snake_case_ )
lowercase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(snake_case_ )
if target is not None:
lowercase : Any = tf.boolean_mask(snake_case_ ,snake_case_ )
lowercase : Any = tf.boolean_mask(snake_case_ ,snake_case_ )
lowercase : List[Any] = self._gather_logprob(snake_case_ ,snake_case_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(snake_case_ ,-cur_logprob ,shape_list(snake_case_ ) )
lowercase : Union[str, Any] = tf.concat(snake_case_ ,axis=-1 )
if target is not None:
if return_mean:
lowercase : List[str] = tf.reduce_mean(snake_case_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(snake_case_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(snake_case_ ,name=self.name ,aggregation="""mean""" if return_mean else """""" )
return out
| 20 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 301 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Optional[Any] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case__ ( __lowercase ):
A__ = '''unispeech'''
def __init__( self : Any , __a : Tuple=32 , __a : Any=768 , __a : List[str]=12 , __a : Optional[Any]=12 , __a : Tuple=3072 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : int=0.1 , __a : List[str]=0.1 , __a : Any=0.0 , __a : Tuple=0.0 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[int]=0.0_2 , __a : str=1e-5 , __a : List[Any]="group" , __a : Optional[Any]="gelu" , __a : str=(512, 512, 512, 512, 512, 512, 512) , __a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __a : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __a : List[str]=False , __a : List[str]=128 , __a : Optional[Any]=16 , __a : Union[str, Any]=False , __a : Tuple=True , __a : Dict=0.0_5 , __a : int=10 , __a : Optional[int]=2 , __a : Any=0.0 , __a : str=10 , __a : Optional[Any]=0 , __a : Any=320 , __a : Any=2 , __a : str=0.1 , __a : List[str]=100 , __a : str=256 , __a : str=256 , __a : List[str]=0.1 , __a : str="mean" , __a : List[Any]=False , __a : Tuple=False , __a : Any=256 , __a : List[Any]=80 , __a : List[str]=0 , __a : List[Any]=1 , __a : Union[str, Any]=2 , __a : List[Any]=0.5 , **__a : Any , ) -> Dict:
'''simple docstring'''
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
__snake_case : Optional[int] = hidden_size
__snake_case : List[str] = feat_extract_norm
__snake_case : Union[str, Any] = feat_extract_activation
__snake_case : Any = list(_a )
__snake_case : Optional[int] = list(_a )
__snake_case : Dict = list(_a )
__snake_case : List[str] = conv_bias
__snake_case : int = num_conv_pos_embeddings
__snake_case : List[Any] = num_conv_pos_embedding_groups
__snake_case : List[str] = len(self.conv_dim )
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Dict = attention_dropout
__snake_case : Tuple = activation_dropout
__snake_case : Any = feat_proj_dropout
__snake_case : Union[str, Any] = final_dropout
__snake_case : Dict = layerdrop
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = num_ctc_classes
__snake_case : int = vocab_size
__snake_case : List[Any] = do_stable_layer_norm
__snake_case : str = use_weighted_layer_sum
__snake_case : Optional[int] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Optional[Any] = apply_spec_augment
__snake_case : Dict = mask_time_prob
__snake_case : Tuple = mask_time_length
__snake_case : Tuple = mask_time_min_masks
__snake_case : List[Any] = mask_feature_prob
__snake_case : Dict = mask_feature_length
__snake_case : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : int = num_codevectors_per_group
__snake_case : Optional[Any] = num_codevector_groups
__snake_case : str = contrastive_logits_temperature
__snake_case : Tuple = feat_quantizer_dropout
__snake_case : Optional[Any] = num_negatives
__snake_case : str = codevector_dim
__snake_case : Tuple = proj_codevector_dim
__snake_case : Optional[Any] = diversity_loss_weight
# ctc loss
__snake_case : List[Any] = ctc_loss_reduction
__snake_case : Any = ctc_zero_infinity
# pretraining loss
__snake_case : Any = replace_prob
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Optional[int] = {}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''llama'''
A__ = ['''past_key_values''']
def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = vocab_size
__snake_case : List[str] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[Any] = num_key_value_heads
__snake_case : int = hidden_act
__snake_case : Any = initializer_range
__snake_case : Any = rms_norm_eps
__snake_case : Union[str, Any] = pretraining_tp
__snake_case : Optional[int] = use_cache
__snake_case : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
__snake_case : Optional[Any] = self.rope_scaling.get('type' , __a )
__snake_case : Tuple = self.rope_scaling.get('factor' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 0 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : str=7, UpperCAmelCase__ : int=3, UpperCAmelCase__ : List[Any]=1_8, UpperCAmelCase__ : Optional[Any]=3_0, UpperCAmelCase__ : Optional[int]=4_0_0, UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : List[str]=None, ):
__lowercase = size if size is not None else {"height": 2_0, "width": 2_0}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = size
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
__lowercase = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowercase ( self : Tuple ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowercase ( self : Any ):
__lowercase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
__lowercase = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ,)
@require_torch
@require_vision
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : Union[str, Any] ):
__lowercase = PixaStructImageProcessingTester(self )
@property
def _lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : int ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__, "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__, "do_convert_rgb" ) )
def _lowercase ( self : int ):
__lowercase = self.image_processor_tester.prepare_dummy_image()
__lowercase = self.image_processing_class(**self.image_processor_dict )
__lowercase = 2_0_4_8
__lowercase = image_processor(UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_606 ), atol=1E-3, rtol=1E-3 ) )
def _lowercase ( self : Dict ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Tuple ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
__lowercase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCAmelCase__ ):
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
__lowercase = "Hello"
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__, header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__, header_text=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Any ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, np.ndarray )
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def _lowercase ( self : Dict ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." ,)
@require_torch
@require_vision
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
__lowercase = PixaStructImageProcessingTester(self, num_channels=4 )
__lowercase = 3
@property
def _lowercase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__, "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__, "do_convert_rgb" ) )
def _lowercase ( self : List[Any] ):
# Initialize image_processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
__lowercase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowercase = image_processor(
image_inputs[0], return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
__lowercase = image_processor(
UpperCAmelCase__, return_tensors="pt", max_patches=UpperCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 17 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Collection[float] | None = None ):
if components is None:
lowercase__ : List[Any] = []
lowercase__ : str = list(SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : int ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE , self.__components ) ) + ")"
def __add__( self : List[Any] , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Optional[Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : int = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
lowercase__ : Dict = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(self ) == len(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = len(self )
lowercase__ : Union[str, Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return sum(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("invalid operand!" )
def snake_case ( self : Any ):
return Vector(self.__components )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : int = value
def snake_case ( self : Any ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Vector , SCREAMING_SNAKE_CASE : bool = False ):
lowercase__ : Optional[Any] = self * other
lowercase__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
lowercase__ : Tuple = [0] * dimension
lowercase__ : Union[str, Any] = 1
return Vector(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : Optional[Any] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[list[float]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = matrix
lowercase__ : int = w
lowercase__ : str = h
def __str__( self : int ):
lowercase__ : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Union[str, Any] = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Dict = []
for i in range(self.__height ):
lowercase__ : Optional[Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # matrix-vector
if len(SCREAMING_SNAKE_CASE ) == self.__width:
lowercase__ : Any = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE , sum(SCREAMING_SNAKE_CASE ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # matrix-scalar
lowercase__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
return None
def snake_case ( self : Union[str, Any] ):
return self.__height
def snake_case ( self : Optional[Any] ):
return self.__width
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Union[str, Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise Exception("Indices out of bounds" )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 371 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Collection[float] | None = None ):
if components is None:
lowercase__ : List[Any] = []
lowercase__ : str = list(SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : int ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE , self.__components ) ) + ")"
def __add__( self : List[Any] , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Optional[Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : int = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
lowercase__ : Dict = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(self ) == len(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = len(self )
lowercase__ : Union[str, Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return sum(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("invalid operand!" )
def snake_case ( self : Any ):
return Vector(self.__components )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : int = value
def snake_case ( self : Any ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Vector , SCREAMING_SNAKE_CASE : bool = False ):
lowercase__ : Optional[Any] = self * other
lowercase__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
lowercase__ : Tuple = [0] * dimension
lowercase__ : Union[str, Any] = 1
return Vector(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : Optional[Any] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[list[float]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = matrix
lowercase__ : int = w
lowercase__ : str = h
def __str__( self : int ):
lowercase__ : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Union[str, Any] = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Dict = []
for i in range(self.__height ):
lowercase__ : Optional[Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # matrix-vector
if len(SCREAMING_SNAKE_CASE ) == self.__width:
lowercase__ : Any = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE , sum(SCREAMING_SNAKE_CASE ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # matrix-scalar
lowercase__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
return None
def snake_case ( self : Union[str, Any] ):
return self.__height
def snake_case ( self : Optional[Any] ):
return self.__width
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Union[str, Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise Exception("Indices out of bounds" )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 121 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase_ = "pt"
elif is_tf_available():
lowerCamelCase_ = "tf"
else:
lowerCamelCase_ = "jax"
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ByTaTokenizer
SCREAMING_SNAKE_CASE_ : Dict = False
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=20 ,SCREAMING_SNAKE_CASE__=5 ) -> Tuple[str, list]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
try:
__SCREAMING_SNAKE_CASE :str = tokenizer.decode([i] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE :Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : re.match(R'''^[ a-zA-Z]+$''' ,t[1] ) ,SCREAMING_SNAKE_CASE__ ) )
__SCREAMING_SNAKE_CASE :List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
__SCREAMING_SNAKE_CASE :Union[str, Any] = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
__SCREAMING_SNAKE_CASE :Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE :str = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
__SCREAMING_SNAKE_CASE :Optional[int] = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE :List[str] = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE :int = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] ,batch_without_eos_added['''input_ids'''] )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :Optional[int] = '''Unicode €.'''
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] ,SCREAMING_SNAKE_CASE__ )
# decoding
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'''Unicode €.</s>''' )
__SCREAMING_SNAKE_CASE :Dict = tokenizer('''e è é ê ë''' )
__SCREAMING_SNAKE_CASE :str = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] ,SCREAMING_SNAKE_CASE__ )
# decoding
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) ,'''e è é ê ë</s>''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__SCREAMING_SNAKE_CASE :int = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__SCREAMING_SNAKE_CASE :int = tokenizer(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE :Optional[int] = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE :int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__SCREAMING_SNAKE_CASE :List[str] = tokenizer(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' ,SCREAMING_SNAKE_CASE__ )
self.assertIn('''attention_mask''' ,SCREAMING_SNAKE_CASE__ )
self.assertNotIn('''decoder_input_ids''' ,SCREAMING_SNAKE_CASE__ )
self.assertNotIn('''decoder_attention_mask''' ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :int = [
'''Summary of the text.''',
'''Another summary.''',
]
__SCREAMING_SNAKE_CASE :Tuple = tokenizer(
text_target=SCREAMING_SNAKE_CASE__ ,max_length=32 ,padding='''max_length''' ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.ta_base_tokenizer
__SCREAMING_SNAKE_CASE :Optional[int] = ['''A long paragraph for summarization. </s>''']
__SCREAMING_SNAKE_CASE :Any = ['''Summary of the text. </s>''']
# fmt: off
__SCREAMING_SNAKE_CASE :Tuple = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__SCREAMING_SNAKE_CASE :int = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__SCREAMING_SNAKE_CASE :str = tokenizer(SCREAMING_SNAKE_CASE__ ,text_target=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,batch['''input_ids'''][0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,batch['''labels'''][0] )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE :Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE :Tuple = ''' He is very happy, UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE :Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__SCREAMING_SNAKE_CASE :Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = after_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertIn('''new_additional_special_token''' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
__SCREAMING_SNAKE_CASE :str = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE__ ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE :Any = json.load(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__SCREAMING_SNAKE_CASE :Optional[Any] = json.load(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = [f'''<extra_id_{i}>''' for i in range(1_25 )]
__SCREAMING_SNAKE_CASE :Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__SCREAMING_SNAKE_CASE :int = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE :Dict = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ ,)
self.assertIn(
'''an_additional_special_token''' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE :Optional[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' ,lstrip=SCREAMING_SNAKE_CASE__ )]
__SCREAMING_SNAKE_CASE :str = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,)
self.assertIn('''a_new_additional_special_token''' ,tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) ,)
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE :Any = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__SCREAMING_SNAKE_CASE :int = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE__ ,attr + '''_id''' ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ ,attr + '''_id''' ) ,SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ ,attr + '''_id''' ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE__ ,attr + '''_id''' ) ,SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens_ids''' ) ,[] )
setattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens_ids''' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens''' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE__ ,'''additional_special_tokens_ids''' ) ,[token_id_to_test_setters] )
| 191 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Optional[int] = [1]
for i in range(2 , a_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE :List[str] = []
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(a_ ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE :Optional[int] = factorials.pop()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = divmod(a_ , a_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 1 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
SCREAMING_SNAKE_CASE__ : int = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Tuple = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> List[str]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ : str = dataset
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Dict = con
SCREAMING_SNAKE_CASE__ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : List[str] = num_proc
SCREAMING_SNAKE_CASE__ : Union[str, Any] = to_sql_kwargs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_sql_kwargs.pop("""sql""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.to_sql_kwargs.pop("""con""" , _a )
SCREAMING_SNAKE_CASE__ : List[str] = self.to_sql_kwargs.pop("""index""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = args
SCREAMING_SNAKE_CASE__ : List[Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Any = batch.to_pandas()
SCREAMING_SNAKE_CASE__ : Any = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _a ( self , _a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def _lowercase ( ) -> Union[str, Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 56 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : int=99 , __lowerCamelCase : int=16 , __lowerCamelCase : int=36 , __lowerCamelCase : Any=6 , __lowerCamelCase : int=6 , __lowerCamelCase : Dict=6 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=None , ):
UpperCamelCase :List[Any] = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Optional[Any] = seq_length
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :str = use_input_mask
UpperCamelCase :List[str] = use_token_type_ids
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Optional[Any] = embedding_size
UpperCamelCase :str = hidden_size
UpperCamelCase :Union[str, Any] = num_hidden_layers
UpperCamelCase :Union[str, Any] = num_hidden_groups
UpperCamelCase :Dict = num_attention_heads
UpperCamelCase :int = intermediate_size
UpperCamelCase :List[Any] = hidden_act
UpperCamelCase :Any = hidden_dropout_prob
UpperCamelCase :str = attention_probs_dropout_prob
UpperCamelCase :Optional[int] = max_position_embeddings
UpperCamelCase :Any = type_vocab_size
UpperCamelCase :List[str] = type_sequence_label_size
UpperCamelCase :int = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :List[Any] = num_choices
UpperCamelCase :List[Any] = scope
def _A ( self : int ):
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Dict = None
if self.use_input_mask:
UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :str = None
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :List[str] = None
if self.use_labels:
UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : int ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str ):
UpperCamelCase :Dict = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
UpperCamelCase :str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
UpperCamelCase :Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Optional[Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _A ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ):
UpperCamelCase :List[Any] = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :Optional[Any] = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[str] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int ):
UpperCamelCase :int = self.num_labels
UpperCamelCase :int = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Any = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ):
UpperCamelCase :Dict = self.num_choices
UpperCamelCase :List[Any] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCamelCase :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase :int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : int ):
UpperCamelCase :Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Optional[int] = config_and_inputs
UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : List[Any] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Dict = True
def _A ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict=False ):
UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
UpperCamelCase :Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
UpperCamelCase :Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _A ( self : Any ):
UpperCamelCase :Optional[int] = AlbertModelTester(self )
UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def _A ( self : str ):
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase :Optional[Any] = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def _A ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :List[Any] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase :str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase :Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase :List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 38 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class a ( __UpperCamelCase ):
def __init__( self , __magic_name__ ) -> Optional[int]:
super().__init__()
_a = nn.ModuleList(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__magic_name__ , __magic_name__ , self.nets ) ):
_a = controlnet(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , )
# merge samples
if i == 0:
_a = down_samples, mid_sample
else:
_a = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__magic_name__ , __magic_name__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , ) -> List[str]:
_a = 0
_a = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__magic_name__ , is_main_process=__magic_name__ , save_function=__magic_name__ , safe_serialization=__magic_name__ , variant=__magic_name__ , )
idx += 1
_a = model_path_to_save + f'_{idx}'
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
_a = 0
_a = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_a = pretrained_model_path
while os.path.isdir(__magic_name__ ):
_a = ControlNetModel.from_pretrained(__magic_name__ , **__magic_name__ )
controlnets.append(__magic_name__ )
idx += 1
_a = pretrained_model_path + f'_{idx}'
logger.info(f'{len(__magic_name__ )} controlnets loaded from {pretrained_model_path}.' )
if len(__magic_name__ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(__magic_name__ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(__magic_name__ )
| 351 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : str = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : str = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
__UpperCAmelCase : List[Any] = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 226 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = '''ibert'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=False , _UpperCamelCase="none" , **_UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : str = quant_mode
UpperCAmelCase_ : Tuple = force_dequant
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 145 |
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
for char in word:
__lowerCamelCase = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = set()
for token in tokens:
__lowerCamelCase = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
__lowerCamelCase = list(UpperCamelCase__ )
return word_list
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : set() ) -> str:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__lowerCamelCase = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
__lowerCamelCase = bert_tokens
__lowerCamelCase , __lowerCamelCase = 0, len(UpperCamelCase__ )
while start < end:
__lowerCamelCase = True
if is_chinese(bert_word[start] ):
__lowerCamelCase = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
__lowerCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase = '##' + bert_word[j]
__lowerCamelCase = start + i
__lowerCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : LTP , UpperCamelCase__ : BertTokenizer ) -> int:
"""simple docstring"""
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
__lowerCamelCase = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
__lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
__lowerCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = []
for id in input_ids:
__lowerCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
__lowerCamelCase = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
__lowerCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase = [json.dumps(UpperCamelCase__ ) + '\n' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__A = parser.parse_args()
main(args)
| 90 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : List[str] , *_A : List[str] , _A : List[str]=None , _A : int=None , **_A : str ) -> Any:
"""simple docstring"""
super().__init__(*_A , **_A )
snake_case_ : Optional[Any] = eval_examples
snake_case_ : Tuple = post_process_function
def UpperCAmelCase_ ( self : Optional[int] , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , _A : str = "eval" ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ : Optional[Any] = self.get_eval_dataloader(_A )
snake_case_ : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : List[str] = self.compute_metrics
snake_case_ : List[str] = None
snake_case_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case_ : Optional[Any] = time.time()
try:
snake_case_ : Any = eval_loop(
_A , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
snake_case_ : int = compute_metrics
snake_case_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case_ : Dict = self.post_process_function(_A , _A , output.predictions )
snake_case_ : Optional[int] = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
snake_case_ : Any = metrics.pop(_A )
metrics.update(output.metrics )
else:
snake_case_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def UpperCAmelCase_ ( self : Optional[int] , _A : Tuple , _A : Union[str, Any] , _A : List[Any]=None , _A : str = "test" ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Tuple = self.compute_metrics
snake_case_ : List[str] = None
snake_case_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
snake_case_ : Optional[int] = time.time()
try:
snake_case_ : Union[str, Any] = eval_loop(
_A , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
snake_case_ : Optional[Any] = compute_metrics
snake_case_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ : Dict = self.post_process_function(_A , _A , output.predictions , 'predict' )
snake_case_ : Any = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
snake_case_ : Dict = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 88 |
def SCREAMING_SNAKE_CASE__ ( __a , __a = False ):
if not isinstance(__a , __a ):
snake_case_ : str = f"""Expected string as input, found {type(__a )}"""
raise ValueError(__a )
if not isinstance(__a , __a ):
snake_case_ : int = f"""Expected boolean as use_pascal parameter, found {type(__a )}"""
raise ValueError(__a )
snake_case_ : Union[str, Any] = input_str.split('_' )
snake_case_ : int = 0 if use_pascal else 1
snake_case_ : List[Any] = words[start_index:]
snake_case_ : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case_ : Optional[Any] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 88 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """open-llama"""
def __init__( self , A_=10_0000 , A_=4096 , A_=1_1008 , A_=32 , A_=32 , A_="silu" , A_=2048 , A_=0.02 , A_=1e-6 , A_=True , A_=0 , A_=1 , A_=2 , A_=False , A_=True , A_=0.1 , A_=0.1 , A_=True , A_=True , A_=None , **A_ , ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : List[Any] = rms_norm_eps
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : List[str] = kwargs.pop(
'''use_memorry_efficient_attention''' , A_ )
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_dropout_prob
__lowerCAmelCase : Optional[Any] = use_stable_embedding
__lowerCAmelCase : str = shared_input_output_embedding
__lowerCAmelCase : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__lowerCAmelCase : Optional[int] = self.rope_scaling.get('''type''' , A_ )
__lowerCAmelCase : Optional[int] = self.rope_scaling.get('''factor''' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 275 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Any = global_rng
__lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : str = min_seq_length
__lowerCAmelCase : int = max_seq_length
__lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Any = padding_value
__lowerCAmelCase : str = sampling_rate
__lowerCAmelCase : Optional[Any] = return_attention_mask
__lowerCAmelCase : Optional[Any] = do_normalize
__lowerCAmelCase : Optional[Any] = feature_size
__lowerCAmelCase : Optional[int] = chunk_length
__lowerCAmelCase : Optional[Any] = hop_length
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Optional[Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Optional[Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : List[str] = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCAmelCase : int = self.feature_extraction_class.from_pretrained(A_ )
__lowerCAmelCase : Dict = feat_extract_first.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters
__lowerCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
__lowerCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
__lowerCAmelCase : List[str] = feat_extract_first.to_dict()
__lowerCAmelCase : Tuple = feat_extract_second.to_dict()
__lowerCAmelCase : Any = feat_extract_first.mel_filters
__lowerCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Tuple = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : Optional[int] = np.asarray(A_ )
__lowerCAmelCase : Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCAmelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
__lowerCAmelCase : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase : Any = feature_extractor(A_ , return_tensors='''np''' ).input_features
__lowerCAmelCase : List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
import torch
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : Any = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__lowerCAmelCase : int = self._load_datasamples(1 )
__lowerCAmelCase : Any = WhisperFeatureExtractor()
__lowerCAmelCase : Optional[Any] = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = self._load_datasamples(1 )[0]
__lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowerCAmelCase : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 275 | 1 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_SCREAMING_SNAKE_CASE : List[Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _lowerCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Any ):
'''simple docstring'''
warnings.warn(UpperCAmelCase , UpperCAmelCase )
requires_backends(UpperCAmelCase , '''sklearn''' )
return (preds == labels).mean()
def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
warnings.warn(UpperCAmelCase , UpperCAmelCase )
requires_backends(UpperCAmelCase , '''sklearn''' )
UpperCamelCase__ : List[Any] =simple_accuracy(UpperCAmelCase , UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] =fa_score(y_true=UpperCAmelCase , y_pred=UpperCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : str ):
'''simple docstring'''
warnings.warn(UpperCAmelCase , UpperCAmelCase )
requires_backends(UpperCAmelCase , '''sklearn''' )
UpperCamelCase__ : List[str] =pearsonr(UpperCAmelCase , UpperCAmelCase )[0]
UpperCamelCase__ : str =spearmanr(UpperCAmelCase , UpperCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ):
'''simple docstring'''
warnings.warn(UpperCAmelCase , UpperCAmelCase )
requires_backends(UpperCAmelCase , '''sklearn''' )
assert len(UpperCAmelCase ) == len(UpperCAmelCase ), F'''Predictions and labels have mismatched lengths {len(UpperCAmelCase )} and {len(UpperCAmelCase )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCAmelCase , UpperCAmelCase )
elif task_name == "qqp":
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
'''simple docstring'''
warnings.warn(UpperCAmelCase , UpperCAmelCase )
requires_backends(UpperCAmelCase , '''sklearn''' )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(UpperCAmelCase )
| 361 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'ChineseCLIPImageProcessor'
SCREAMING_SNAKE_CASE_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
UpperCamelCase__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
UpperCamelCase__ : List[str] =kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.image_processor
def __call__( self : Optional[int] , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Optional[int] =self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCamelCase__ : str =self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCamelCase__ : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowerCAmelCase ( self : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowerCAmelCase ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[str] =self.tokenizer.model_input_names
UpperCamelCase__ : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : Any ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
| 157 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__snake_case =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__snake_case =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCAmelCase_ :
def __init__( self : List[str] ) -> str:
lowerCAmelCase = WATERMARK_BITS
lowerCAmelCase = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : torch.FloatTensor ) -> str:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
lowerCAmelCase = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase = [self.encoder.encode(UpperCAmelCase__ , 'dwtDct' ) for image in images]
lowerCAmelCase = torch.from_numpy(np.array(UpperCAmelCase__ ) ).permute(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 4 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {"""bert_for_seq_generation""": 512}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = []
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int]="<s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : Optional[Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : int="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) -> None:
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self : int ) -> Optional[int]:
_UpperCamelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : str , __UpperCamelCase : Any ) -> Tuple:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Any ) -> Optional[int]:
return self.sp_model.piece_to_id(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def _UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> Optional[Any]:
_UpperCamelCase = []
_UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCamelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 256 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 30 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 58 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return EnvironmentCommand()
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class _a ( A__):
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any] )-> str:
lowerCAmelCase__ : int = parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCamelCase__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=lowerCamelCase__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , *_SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : List[str] = accelerate_config_file
def UpperCAmelCase__( self : Dict )-> Dict:
lowerCAmelCase__ : Optional[int] = '''not installed'''
if is_safetensors_available():
import safetensors
lowerCAmelCase__ : str = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowerCAmelCase__ : Any = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowerCAmelCase__ : Dict = '''not installed'''
lowerCAmelCase__ : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase__ : Tuple = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase__ ):
lowerCAmelCase__ : Any = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase__ : Any = (
'''\n'''.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else F'\t{accelerate_config}'
)
lowerCAmelCase__ : Any = '''not installed'''
lowerCAmelCase__ : Optional[int] = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase__ : Optional[Any] = torch.__version__
lowerCAmelCase__ : Any = torch.cuda.is_available()
lowerCAmelCase__ : Optional[Any] = '''not installed'''
lowerCAmelCase__ : List[str] = '''NA'''
if is_tf_available():
import tensorflow as tf
lowerCAmelCase__ : Any = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase__ : Optional[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase__ : str = bool(tf.config.list_physical_devices('''GPU''' ) )
lowerCAmelCase__ : Optional[Any] = '''not installed'''
lowerCAmelCase__ : Any = '''not installed'''
lowerCAmelCase__ : str = '''not installed'''
lowerCAmelCase__ : List[Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase__ : Union[str, Any] = flax.__version__
lowerCAmelCase__ : int = jax.__version__
lowerCAmelCase__ : Union[str, Any] = jaxlib.__version__
lowerCAmelCase__ : int = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase__ : Optional[int] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'{safetensors_version}',
'''Accelerate version''': F'{accelerate_version}',
'''Accelerate config''': F'{accelerate_config_str}',
'''PyTorch version (GPU?)''': F'{pt_version} ({pt_cuda_available})',
'''Tensorflow version (GPU?)''': F'{tf_version} ({tf_cuda_available})',
'''Flax version (CPU?/GPU?/TPU?)''': F'{flax_version} ({jax_backend})',
'''Jax version''': F'{jax_version}',
'''JaxLib version''': F'{jaxlib_version}',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any] )-> Any:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( A : Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = torch.load(A , map_location='''cpu''' )
if "model" in sd.keys():
UpperCAmelCase_ : Union[str, Any] = torch.load(A , map_location='''cpu''' )['''model''']
# pop unnecessary weights
UpperCAmelCase_ : Tuple = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
UpperCAmelCase_ : str = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ : Union[str, Any] = sd.pop(A )
UpperCAmelCase_ : Dict = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ : Any = sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ : Optional[int] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
UpperCAmelCase_ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
UpperCAmelCase_ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
UpperCAmelCase_ : Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.split(A , depth // 3 , dim=0 )
UpperCAmelCase_ : Dict = q
UpperCAmelCase_ : Optional[int] = k
UpperCAmelCase_ : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : Any=None ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = load_checkpoint(A )
if config is not None:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained(A )
else:
UpperCAmelCase_ : Dict = OPTConfig()
UpperCAmelCase_ : Union[str, Any] = OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
_UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
_UpperCamelCase : List[str] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 304 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = XLMRobertaTokenizer
__lowerCAmelCase = XLMRobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """<pad>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase_ ) , 1002 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
UpperCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
UpperCamelCase = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = """I was born in 92000, and this is falsé."""
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """Hello World!"""
UpperCamelCase = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCamelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 165 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ : int = position
UpperCAmelCase__ : List[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase__ : List[str] = []
for position in positions:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def _UpperCamelCase ( UpperCamelCase__ ):
return not any(elem == 0 for row in board for elem in row )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
UpperCAmelCase__ , UpperCAmelCase__ : str = position
if board[y][x] == 0:
UpperCAmelCase__ : Optional[int] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
UpperCAmelCase__ : List[Any] = 0
return False
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Dict = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = '''pegasus'''
lowerCAmelCase :Optional[int] = ['''past_key_values''']
lowerCAmelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=1 , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : List[str] = encoder_attention_heads
UpperCAmelCase__ : int = decoder_ffn_dim
UpperCAmelCase__ : Any = decoder_layers
UpperCAmelCase__ : Tuple = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = dropout
UpperCAmelCase__ : Dict = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def snake_case__ ( self):
return self.encoder_attention_heads
@property
def snake_case__ ( self):
return self.d_model
| 163 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : Any = 25_00_04
UpperCAmelCase : Union[str, Any] = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = MBartTokenizer
UpperCamelCase : List[str] = MBartTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Any = True
def __A ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = MBartTokenizer(A , keep_accents=A )
lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase = tokenizer_r.from_pretrained(A )
lowerCamelCase = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = "facebook/mbart-large-en-ro"
UpperCamelCase : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCamelCase : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCamelCase : Optional[int] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __A ( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCamelCase = 1
return cls
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.assertIn(A , self.tokenizer.all_special_ids )
lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
lowerCamelCase = self.tokenizer.decode(A , skip_special_tokens=A )
lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
lowerCamelCase = 10
lowerCamelCase = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
lowerCamelCase = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
lowerCamelCase = targets["""input_ids"""]
lowerCamelCase = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 66 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = """french fries"""
lowerCamelCase = sd_pipe(**A , negative_prompt=A )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A , view_batch_size=2 )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase = False
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 66 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
a :Tuple = [[1, 2, 4], [1, 2, 3, 4]]
a :str = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE__ ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
a :Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = [[1, 2, 3], [1, 2, 4]]
a :Dict = DisjunctiveConstraint(_lowerCamelCase )
a , a , a :Union[str, Any] = dc.update(1 )
a :Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Optional[Any] = dc.update(2 )
a :Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :Union[str, Any] = dc.update(3 )
a :Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a :List[str] = DisjunctiveConstraint(_lowerCamelCase )
a , a , a :Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a , a , a :Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a , a , a :List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a , a , a :Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a , a , a :List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 94 |
import math
class _snake_case :
def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1
a :Optional[int] = n
a :Union[str, Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # adjacency matrix for weight
a :List[Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = w
def SCREAMING_SNAKE_CASE__ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return self.dp[u][v]
if __name__ == "__main__":
snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 94 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = """ResNetConfig"""
# Base docstring
_SCREAMING_SNAKE_CASE : str = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ )
UpperCamelCase__ : Tuple =nn.BatchNormad(lowercase_ )
UpperCamelCase__ : int =ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor ):
UpperCamelCase__ : List[Any] =self.convolution(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.normalization(lowercase_ )
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Any =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase__ : Tuple =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase__ : Any =config.num_channels
def _lowerCAmelCase ( self : str , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase__ : Dict =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.pooler(lowercase_ )
return embedding
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
UpperCamelCase__ : int =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
UpperCamelCase__ : Optional[int] =nn.BatchNormad(lowercase_ )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Dict =self.convolution(lowercase_ )
UpperCamelCase__ : Dict =self.normalization(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : List[str] =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , )
UpperCamelCase__ : Any =ACTaFN[activation]
def _lowerCAmelCase ( self : str , lowercase_ : Tuple ):
UpperCamelCase__ : Any =hidden_state
UpperCamelCase__ : Union[str, Any] =self.layer(lowercase_ )
UpperCamelCase__ : str =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : str =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" , lowercase_ : int = 4 ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] =out_channels // reduction
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : int =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
UpperCamelCase__ : List[Any] =ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
UpperCamelCase__ : Dict =hidden_state
UpperCamelCase__ : str =self.layer(lowercase_ )
UpperCamelCase__ : Tuple =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : ResNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
UpperCamelCase__ : Dict =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
UpperCamelCase__ : Union[str, Any] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =input
for layer in self.layers:
UpperCamelCase__ : Tuple =layer(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
UpperCamelCase__ : int =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] =hidden_states + (hidden_state,)
UpperCamelCase__ : List[str] =stage_module(lowercase_ )
if output_hidden_states:
UpperCamelCase__ : Optional[Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=lowercase_ , )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ResNetConfig
SCREAMING_SNAKE_CASE_ = 'resnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = True
def _lowerCAmelCase ( self : str , lowercase_ : Optional[int] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict=False ):
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase__ : str =value
_SCREAMING_SNAKE_CASE : int = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_SCREAMING_SNAKE_CASE : Optional[int] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Dict =config
UpperCamelCase__ : str =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : str =ResNetEncoder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Optional[Any] =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : int =encoder_outputs[0]
UpperCamelCase__ : List[Any] =self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Union[str, Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Any =config.num_labels
UpperCamelCase__ : Dict =ResNetModel(lowercase_ )
# classification head
UpperCamelCase__ : Any =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
UpperCamelCase__ : Dict =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[Any] =self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : Tuple =outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Union[str, Any] =self.classifier(lowercase_ )
UpperCamelCase__ : int =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : List[str] ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Dict ='''single_label_classification'''
else:
UpperCamelCase__ : str ='''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : Union[str, Any] =MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ : Dict =loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[Any] =CrossEntropyLoss()
UpperCamelCase__ : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[Any] =BCEWithLogitsLoss()
UpperCamelCase__ : List[str] =loss_fct(lowercase_ , lowercase_ )
if not return_dict:
UpperCamelCase__ : Tuple =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ', snake_case__, )
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
def __init__( self : str , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
super()._init_backbone(lowercase_ )
UpperCamelCase__ : str =[config.embedding_size] + config.hidden_sizes
UpperCamelCase__ : Optional[int] =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : Dict =ResNetEncoder(lowercase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : int , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any =self.embedder(lowercase_ )
UpperCamelCase__ : Optional[Any] =self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : str =outputs.hidden_states
UpperCamelCase__ : Optional[int] =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase__ : int =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
| 357 |
"""simple docstring"""
import argparse
import os
import re
_SCREAMING_SNAKE_CASE : List[str] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : Any = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : str = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""\[([^\]]+)\]""")
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str =_re_indent.search(UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ : int =0
UpperCamelCase__ : Union[str, Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase ):
index += 1
UpperCamelCase__ : Optional[int] =['''\n'''.join(lines[:index] )]
else:
UpperCamelCase__ : List[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ : Dict =[lines[index]]
index += 1
while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCAmelCase ) )
if index < len(UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
UpperCamelCase__ : List[str] =[]
else:
blocks.append('''\n'''.join(UpperCAmelCase ) )
UpperCamelCase__ : List[Any] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
def _inner(UpperCAmelCase : Dict ):
return key(UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Dict=None ):
'''simple docstring'''
def noop(UpperCAmelCase : Optional[Any] ):
return x
if key is None:
UpperCamelCase__ : int =noop
# Constants are all uppercase, they go first.
UpperCamelCase__ : List[str] =[obj for obj in objects if key(UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ : Dict =[obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ : int =[obj for obj in objects if not key(UpperCAmelCase )[0].isupper()]
UpperCamelCase__ : Optional[int] =ignore_underscore(UpperCAmelCase )
return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
def _replace(UpperCAmelCase : Union[str, Any] ):
UpperCamelCase__ : List[str] =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]"
UpperCamelCase__ : List[Any] =import_statement.split('''\n''' )
if len(UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ : List[str] =2 if lines[1].strip() == '''[''' else 1
UpperCamelCase__ : List[str] =[(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ : List[str] =sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )
UpperCamelCase__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ : Dict =_re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
UpperCamelCase__ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] )
return "\n".join(UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ : List[str] =_re_bracket_content.sub(_replace , UpperCAmelCase )
return import_statement
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=True ):
'''simple docstring'''
with open(UpperCAmelCase , '''r''' ) as f:
UpperCamelCase__ : int =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ : Optional[int] =split_code_in_indented_blocks(
UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ : Dict =main_blocks[block_idx]
UpperCamelCase__ : List[str] =block.split('''\n''' )
# Get to the start of the imports.
UpperCamelCase__ : str =0
while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ : Optional[int] =len(UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] )
UpperCamelCase__ : Tuple =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ : str =split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ : str =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ : Tuple =[(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ : List[Any] =[(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None]
UpperCamelCase__ : Optional[Any] =[x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =[]
for i in range(len(UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write('''\n'''.join(UpperCAmelCase ) )
def _lowerCAmelCase ( UpperCAmelCase : Dict=True ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =[]
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
UpperCamelCase__ : List[Any] =sort_imports(os.path.join(UpperCAmelCase , '''__init__.py''' ) , check_only=UpperCAmelCase )
if result:
UpperCamelCase__ : int =[os.path.join(UpperCAmelCase , '''__init__.py''' )]
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 157 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return EnvironmentCommand(args.accelerate_config_file )
class A_ ( SCREAMING_SNAKE_CASE ):
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser):
__lowerCamelCase : List[str] = parser.add_parser('env')
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
download_parser.add_argument(
'--accelerate-config_file' ,default=SCREAMING_SNAKE_CASE__ ,help='The accelerate config file to use for the default values in the launching script.' ,)
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__)
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,*SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Union[str, Any] = accelerate_config_file
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = 'not installed'
if is_safetensors_available():
import safetensors
__lowerCamelCase : Tuple = safetensors.__version__
elif importlib.util.find_spec('safetensors') is not None:
import safetensors
__lowerCamelCase : Union[str, Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
__lowerCamelCase : str = 'not installed'
__lowerCamelCase : Optional[int] = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowerCamelCase : Optional[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[Any] = load_config_from_file(self._accelerate_config_file).to_dict()
__lowerCamelCase : Optional[int] = (
'\n'.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else F"\t{accelerate_config}"
)
__lowerCamelCase : Dict = 'not installed'
__lowerCamelCase : Union[str, Any] = 'NA'
if is_torch_available():
import torch
__lowerCamelCase : str = torch.__version__
__lowerCamelCase : str = torch.cuda.is_available()
__lowerCamelCase : Any = 'not installed'
__lowerCamelCase : Tuple = 'NA'
if is_tf_available():
import tensorflow as tf
__lowerCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
__lowerCamelCase : int = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowerCamelCase : Tuple = bool(tf.config.list_physical_devices('GPU'))
__lowerCamelCase : Union[str, Any] = 'not installed'
__lowerCamelCase : int = 'not installed'
__lowerCamelCase : Optional[Any] = 'not installed'
__lowerCamelCase : Any = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
__lowerCamelCase : Any = flax.__version__
__lowerCamelCase : Optional[Any] = jax.__version__
__lowerCamelCase : Union[str, Any] = jaxlib.__version__
__lowerCamelCase : Any = jax.lib.xla_bridge.get_backend().platform
__lowerCamelCase : List[Any] = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F"{safetensors_version}",
'Accelerate version': F"{accelerate_version}",
'Accelerate config': F"{accelerate_config_str}",
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Tensorflow version (GPU?)': F"{tf_version} ({tf_cuda_available})",
'Flax version (CPU?/GPU?/TPU?)': F"{flax_version} ({jax_backend})",
'Jax version': F"{jax_version}",
'JaxLib version': F"{jaxlib_version}",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(SCREAMING_SNAKE_CASE__))
return info
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Any):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()]) + "\n"
| 73 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : List[Any]=None ) -> Optional[Any]:
lowercase : str = file_names
lowercase : Optional[Any] = image_transform
lowercase : int = label_to_id
def __len__( self : List[Any] ) -> Any:
return len(self.file_names )
def __getitem__( self : str, lowerCAmelCase : Optional[int] ) -> Optional[Any]:
lowercase : List[Any] = self.file_names[idx]
lowercase : Tuple = PIL.Image.open(lowerCAmelCase )
lowercase : Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase : Optional[Any] = self.image_transform(lowerCAmelCase )
lowercase : Any = extract_label(lowerCAmelCase )
if self.label_to_id is not None:
lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowercase : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Union[str, Any] = config['lr']
lowercase : Any = int(config['num_epochs'] )
lowercase : Union[str, Any] = int(config['seed'] )
lowercase : List[Any] = int(config['batch_size'] )
lowercase : str = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowercase : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase : Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase : Optional[int] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowercase : Optional[Any] = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase : str = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowercase : List[Any] = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowercase : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowercase : List[Any] = np.random.permutation(len(_UpperCAmelCase ) )
lowercase : Optional[Any] = int(0.8 * len(_UpperCAmelCase ) )
lowercase : int = random_perm[:cut]
lowercase : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase : Dict = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowercase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowercase : List[Any] = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowercase : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowercase : Any = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase : Dict = False
for param in model.get_classifier().parameters():
lowercase : Dict = True
# We normalize the batches of images to be a bit faster.
lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase : Dict = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase : List[Any] = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Tuple = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase : Any = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowercase : List[Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase : List[Any] = None
else:
lowercase : Optional[Any] = int(training_difference.replace('step_' , '' ) )
lowercase : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowercase : str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase : Any = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase : Union[str, Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : List[str] = (batch['image'] - mean) / std
lowercase : Union[str, Any] = model(_UpperCAmelCase )
lowercase : Optional[int] = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Union[str, Any] = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowercase : int = 0
lowercase : List[Any] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : Optional[Any] = (batch['image'] - mean) / std
with torch.no_grad():
lowercase : int = model(_UpperCAmelCase )
lowercase : Tuple = outputs.argmax(dim=-1 )
lowercase , lowercase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase : List[str] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowercase : str = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase : Any = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase : int = parser.parse_args()
lowercase : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 255 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> list:
_lowerCamelCase = len(snake_case )
for i in range(1 , snake_case ):
_lowerCamelCase = collection[i]
_lowerCamelCase = 0
_lowerCamelCase = i - 1
while low <= high:
_lowerCamelCase = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase = mid - 1
else:
_lowerCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
_lowerCamelCase = collection[j - 1]
_lowerCamelCase = val
return collection
if __name__ == "__main__":
A_ : List[Any] =input("""Enter numbers separated by a comma:\n""").strip()
A_ : Optional[int] =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 80 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase__ , unittest.TestCase):
UpperCAmelCase__ : Dict = XLNetTokenizer
UpperCAmelCase__ : List[str] = XLNetTokenizerFast
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Union[str, Any] = True
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = XLNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = '<s>'
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1_006 )
def lowercase_ ( self :Dict ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
__A = XLNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
__A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] )
__A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__A = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__A = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowercase_ ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__A = XLNetTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
__A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def lowercase_ ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = XLNetTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
__A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def lowercase_ ( self :str ) -> Dict:
'''simple docstring'''
__A = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
__A = tokenizer.encode('sequence builders' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.encode('multi-sequence build' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__A = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
__A = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowercase_ ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
__A = {'input_ids': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 161 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase__( ):
lowercase_ : List[Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def lowercase__( ):
lowercase_ : str = parse_args()
# Import training_script as a module.
lowercase_ : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase_ : List[str] = script_fpath.stem
lowercase_ : Dict = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase_ : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 358 |
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A__ ( A__ ):
A__ = 'autoformer'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Dict , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : bool = True , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 64 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 32 , _a : int = 32 , _a : str = "gelu" , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : bool = True , _a : Dict=True , _a : int = 10 , _a : int = 25 , _a : int = 3 , **_a : str , ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prediction_length
_SCREAMING_SNAKE_CASE =context_length if context_length is not None else prediction_length
_SCREAMING_SNAKE_CASE =distribution_output
_SCREAMING_SNAKE_CASE =loss
_SCREAMING_SNAKE_CASE =input_size
_SCREAMING_SNAKE_CASE =num_time_features
_SCREAMING_SNAKE_CASE =lags_sequence
_SCREAMING_SNAKE_CASE =scaling
_SCREAMING_SNAKE_CASE =num_dynamic_real_features
_SCREAMING_SNAKE_CASE =num_static_real_features
_SCREAMING_SNAKE_CASE =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =cardinality
else:
_SCREAMING_SNAKE_CASE =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =embedding_dimension
else:
_SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_SCREAMING_SNAKE_CASE =num_parallel_samples
# Transformer architecture configuration
_SCREAMING_SNAKE_CASE =input_size * len(self.lags_sequence ) + self._number_of_features
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =use_cache
# Autoformer
_SCREAMING_SNAKE_CASE =label_length
_SCREAMING_SNAKE_CASE =moving_average
_SCREAMING_SNAKE_CASE =autocorrelation_factor
super().__init__(is_encoder_decoder=_a , **_a )
@property
def A ( self : Any ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( A__ ):
A__ = 'time_series_transformer'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : Optional[Union[str, bool]] = "mean" , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 32 , _a : int = 32 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : bool = True , _a : str = "gelu" , _a : int = 64 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : Union[str, Any]=True , **_a : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prediction_length
_SCREAMING_SNAKE_CASE =context_length or prediction_length
_SCREAMING_SNAKE_CASE =distribution_output
_SCREAMING_SNAKE_CASE =loss
_SCREAMING_SNAKE_CASE =input_size
_SCREAMING_SNAKE_CASE =num_time_features
_SCREAMING_SNAKE_CASE =lags_sequence
_SCREAMING_SNAKE_CASE =scaling
_SCREAMING_SNAKE_CASE =num_dynamic_real_features
_SCREAMING_SNAKE_CASE =num_static_real_features
_SCREAMING_SNAKE_CASE =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =cardinality
else:
_SCREAMING_SNAKE_CASE =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_SCREAMING_SNAKE_CASE =embedding_dimension
else:
_SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_SCREAMING_SNAKE_CASE =num_parallel_samples
# Transformer architecture configuration
_SCREAMING_SNAKE_CASE =input_size * len(_a ) + self._number_of_features
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 47 | 1 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =["speech"]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] =["speech"]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
requires_backends(self , ["""speech"""] )
| 358 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=2 , lowerCAmelCase : Optional[Any]=56 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=2 , lowerCAmelCase : str=7 , lowerCAmelCase : List[Any]="gelu_new" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Dict=16 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Union[str, Any]="block_sparse" , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : List[Any]=3 , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : List[Any] = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Union[str, Any] = use_attention_mask
__lowerCAmelCase : Tuple = use_token_type_ids
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : List[Any] = type_sequence_label_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : str = num_choices
__lowerCAmelCase : Any = rescale_embeddings
__lowerCAmelCase : str = attention_type
__lowerCAmelCase : List[Any] = use_bias
__lowerCAmelCase : List[str] = block_size
__lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Any = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase : List[str] = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Union[str, Any] ):
return model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowerCAmelCase : str = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCAmelCase : List[Any] = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=1e-5 , lowerCAmelCase : Union[str, Any]="outputs" , lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 139 | 0 |
def __UpperCamelCase ( _A : list[int] , _A : str ) ->list[int]:
"""simple docstring"""
lowerCamelCase_ =int(_A )
# Initialize Result
lowerCamelCase_ =[]
# Traverse through all denomination
for denomination in reversed(_A ):
# Find denominations
while int(_A ) >= int(_A ):
total_value -= int(_A )
answer.append(_A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__A : Any = []
__A : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__A : Any = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__A : Any = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__A : Tuple = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__A : Dict = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"""Following is minimal change for {value}: """)
__A : Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 154 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Any = ["""vqvae"""]
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ , mel=snake_case__ , vqvae=snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , snake_case__ ) else 1000
@torch.no_grad()
def __call__( self , snake_case__ = 1 , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
UpperCAmelCase : str =steps or self.get_default_steps()
self.scheduler.set_timesteps(snake_case__ )
UpperCAmelCase : int =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase : List[Any] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase : int =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=snake_case__ , device=self.device , )
UpperCAmelCase : int =noise
UpperCAmelCase : Optional[int] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(snake_case__ , snake_case__ )
UpperCAmelCase : Tuple =self.mel.audio_slice_to_image(snake_case__ )
UpperCAmelCase : Any =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase : Optional[int] =(input_image / 255) * 2 - 1
UpperCAmelCase : int =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase : Optional[Any] =self.vqvae.encode(torch.unsqueeze(snake_case__ , 0 ) ).latent_dist.sample(
generator=snake_case__ )[0]
UpperCAmelCase : int =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase : Union[str, Any] =self.scheduler.add_noise(snake_case__ , snake_case__ , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase : List[str] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase : Tuple =int(mask_start_secs * pixels_per_second )
UpperCAmelCase : List[str] =int(mask_end_secs * pixels_per_second )
UpperCAmelCase : Tuple =self.scheduler.add_noise(snake_case__ , snake_case__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , snake_case__ ):
UpperCAmelCase : Dict =self.unet(snake_case__ , snake_case__ , snake_case__ )['''sample''']
else:
UpperCAmelCase : str =self.unet(snake_case__ , snake_case__ )['''sample''']
if isinstance(self.scheduler , snake_case__ ):
UpperCAmelCase : Optional[int] =self.scheduler.step(
model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , eta=snake_case__ , generator=snake_case__ , )['''prev_sample''']
else:
UpperCAmelCase : str =self.scheduler.step(
model_output=snake_case__ , timestep=snake_case__ , sample=snake_case__ , generator=snake_case__ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
UpperCAmelCase : int =mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase : Tuple =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase : List[str] =1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase : Any =self.vqvae.decode(snake_case__ )['''sample''']
UpperCAmelCase : Optional[Any] =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[int] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase : Dict =(images * 255).round().astype('''uint8''' )
UpperCAmelCase : Optional[Any] =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(snake_case__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
UpperCAmelCase : List[Any] =[self.mel.image_to_audio(snake_case__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(snake_case__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(snake_case__ ) )
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , snake_case__ )
self.scheduler.set_timesteps(snake_case__ )
UpperCAmelCase : List[Any] =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase : Dict =(sample / 255) * 2 - 1
UpperCAmelCase : List[str] =torch.Tensor(snake_case__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase : int =self.scheduler.alphas_cumprod[t]
UpperCAmelCase : List[Any] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase : List[str] =1 - alpha_prod_t
UpperCAmelCase : List[Any] =self.unet(snake_case__ , snake_case__ )['''sample''']
UpperCAmelCase : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase : Union[str, Any] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase : List[str] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase : Dict =acos(torch.dot(torch.flatten(snake_case__ ) , torch.flatten(snake_case__ ) ) / torch.norm(snake_case__ ) / torch.norm(snake_case__ ) )
return sin((1 - alpha) * theta ) * xa / sin(snake_case__ ) + sin(alpha * theta ) * xa / sin(snake_case__ )
| 369 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__lowerCamelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : int =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : List[Any] =self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase : Tuple =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : str=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=9_9 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=5_1_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="None" , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Any=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = DebertaVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : Dict = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any = True
snake_case__ : Any = False
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = False
snake_case__ : Any = False
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
pass
@slow
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 54 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = LlamaModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
UpperCamelCase = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0]
UpperCamelCase = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__lowercase : str = (LlamaForCausalLM,) if is_torch_available() else ()
__lowercase : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = LlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(A_ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
UpperCamelCase = original_model(A_ ).last_hidden_state
UpperCamelCase = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 10.0}
UpperCamelCase = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
UpperCamelCase = scaled_model(A_ ).last_hidden_state
UpperCamelCase = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
UpperCamelCase = model(torch.tensor(A_ ) )
UpperCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
UpperCamelCase = 'Simply put, the theory of relativity states that '
UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' )
UpperCamelCase = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ )
# greedy generation outputs
UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ )
UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 110 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Any = CTRLTokenizer
__lowercase : Any = False
__lowercase : Union[str, Any] = False
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def __UpperCamelCase ( self , **A_ ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 110 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''CLIPFeatureExtractor''']
_snake_case = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
a : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
a : int = int(lowerCAmelCase__ )
a : Any = dict(sorted(self.labels.items() ) )
def __a ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a : Dict = len(lowerCAmelCase__ )
a : Tuple = self.transformer.config.sample_size
a : Tuple = self.transformer.config.in_channels
a : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
a : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
a : str = torch.tensor([1000] * batch_size , device=self.device )
a : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a : Any = latent_model_input[: len(lowerCAmelCase__ ) // 2]
a : Tuple = torch.cat([half, half] , dim=0 )
a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a : Optional[int] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = torch.floataa if is_mps else torch.floataa
else:
a : str = torch.intaa if is_mps else torch.intaa
a : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a : Union[str, Any] = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
a, a : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a, a : Union[str, Any] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
a : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
a : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a, a : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
a : Any = noise_pred
# compute previous image: x_t -> x_t-1
a : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
a, a : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
a : Tuple = latent_model_input
a : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
a : Any = self.vae.decode(lowerCAmelCase__ ).sample
a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : int = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Whether to SortishSamler or not."""} )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """whether to use adafactor"""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(default=UpperCAmelCase_ , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a__ : Optional[str] = field(
default="""linear""" , metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 43 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Dict = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 37 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : str = sample_size
# time
if time_embedding_type == "fourier":
__UpperCAmelCase : int = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
__UpperCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__UpperCAmelCase : str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
__UpperCAmelCase : Dict = block_out_channels[0]
if use_timestep_embedding:
__UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4
__UpperCAmelCase : str = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
__UpperCAmelCase : Tuple = nn.ModuleList([] )
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
__UpperCAmelCase : Dict = None
# down
__UpperCAmelCase : str = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = output_channel
__UpperCAmelCase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : List[str] = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
__UpperCAmelCase : Optional[Any] = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
__UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = reversed_block_out_channels[0]
if out_block_type is None:
__UpperCAmelCase : Union[str, Any] = out_channels
else:
__UpperCAmelCase : Dict = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
__UpperCAmelCase : int = output_channel
__UpperCAmelCase : str = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
__UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1
__UpperCAmelCase : Dict = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = output_channel
# out
__UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
__UpperCAmelCase : List[Any] = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(sample.device )
__UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
__UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ )
else:
__UpperCAmelCase : Any = timestep_embed[..., None]
__UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__UpperCAmelCase : int = ()
for downsample_block in self.down_blocks:
__UpperCAmelCase , __UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__UpperCAmelCase : Any = down_block_res_samples[-1:]
__UpperCAmelCase : List[Any] = down_block_res_samples[:-1]
__UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
__UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 37 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_text_model'
def __init__( self , lowercase=250002 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=514 , lowercase=1 , lowercase=0.02 , lowercase=0.02 , lowercase=1e-05 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=768 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip_vision_model'
def __init__( self , lowercase=768 , lowercase=3072 , lowercase=512 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=32 , lowercase="quick_gelu" , lowercase=1e-5 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def UpperCamelCase ( cls , lowercase , **lowercase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
A__ , A__ = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'altclip'
__lowerCamelCase = True
def __init__( self , lowercase=None , lowercase=None , lowercase=768 , lowercase=2.6592 , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = kwargs.pop("text_config_dict" , lowercase )
A__ = kwargs.pop("vision_config_dict" , lowercase )
super().__init__(**lowercase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**lowercase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**lowercase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(lowercase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(lowercase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**lowercase )
A__ = AltCLIPVisionConfig(**lowercase )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def UpperCamelCase ( cls , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 68 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = label_idx
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(lowercase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(lowercase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
for sentence in parse_incr(lowercase ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(lowercase ) == len(lowercase )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = 0
for sentence in parse_incr(lowercase ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase )
example_id += 1
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 68 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=36 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1_000 ,) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : Any = text_seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = coordinate_size
UpperCAmelCase_ : Union[str, Any] = shape_size
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_ : List[Any] = text_seq_length
UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_ : Optional[int] = self.text_seq_length + self.image_seq_length
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : int = bbox[i, j, 3]
UpperCAmelCase_ : List[Any] = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Any = bbox[i, j, 2]
UpperCAmelCase_ : Dict = bbox[i, j, 0]
UpperCAmelCase_ : Dict = t
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
UpperCAmelCase_ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : str = LayoutLMvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# text + image
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_ : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : List[Any] = LayoutLMvaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : List[str] = LayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = LayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Dict = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,start_positions=_SCREAMING_SNAKE_CASE ,end_positions=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = LayoutLMvaModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(_SCREAMING_SNAKE_CASE ),
]:
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(_SCREAMING_SNAKE_CASE ),
]:
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE ,)
return inputs_dict
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Dict = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = LayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> int:
return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.tensor([[1, 2]] )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_ : Optional[int] = model(
input_ids=input_ids.to(_SCREAMING_SNAKE_CASE ) ,bbox=bbox.to(_SCREAMING_SNAKE_CASE ) ,pixel_values=pixel_values.to(_SCREAMING_SNAKE_CASE ) ,)
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 352 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
lowerCAmelCase = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
lowerCAmelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
lowerCAmelCase = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
lowerCAmelCase = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
lowerCAmelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
lowerCAmelCase = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
lowerCAmelCase = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
lowerCAmelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} )
lowerCAmelCase = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
lowerCAmelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
lowerCAmelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
lowerCAmelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
lowerCAmelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
lowerCAmelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
lowerCAmelCase = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
lowerCAmelCase = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default=_a , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
lowerCAmelCase = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
lowerCAmelCase = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
lowerCAmelCase = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
lowerCAmelCase = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
lowerCAmelCase = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
lowerCAmelCase = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
lowerCAmelCase = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 235 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( a_ , unittest.TestCase ):
A__ : Any =None
A__ : str =BloomTokenizerFast
A__ : str =BloomTokenizerFast
A__ : Dict =True
A__ : str =False
A__ : List[Any] ="tokenizer_file"
A__ : Optional[Any] ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def A_ ( self : Dict ):
super().setUp()
SCREAMING_SNAKE_CASE__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple , **UpperCAmelCase_ : Any ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
SCREAMING_SNAKE_CASE__ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
SCREAMING_SNAKE_CASE__ = tokenizer.batch_encode_plus(UpperCAmelCase_ )["""input_ids"""]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int , UpperCAmelCase_ : Optional[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.encode(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
tokenizer_r.batch_encode_plus(UpperCAmelCase_ , max_length=UpperCAmelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
SCREAMING_SNAKE_CASE__ = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = next(iter(UpperCAmelCase_ ) )["""premise"""] # pick up one data
SCREAMING_SNAKE_CASE__ = list(sample_data.values() )
SCREAMING_SNAKE_CASE__ = list(map(tokenizer.encode , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = [tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 176 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_A = getLogger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 8 , __UpperCAmelCase = 1024 , __UpperCAmelCase="val" , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase="summarization" , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase = None , __UpperCAmelCase="" , **__UpperCAmelCase , ) -> Dict:
lowerCAmelCase__ : Any = str(__UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Path(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).cuda()
if fpaa:
lowerCAmelCase__ : Optional[int] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCAmelCase , __UpperCAmelCase ) # update config with task specific params
lowerCAmelCase__ : Any = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCAmelCase__ : Tuple = num_return_sequences
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCAmelCase__ : List[str] = tokenizer.model_max_length
if prefix is None:
lowerCAmelCase__ : Optional[int] = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
lowerCAmelCase__ : Tuple = SeqaSeqDataset(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_target_length=1024 , type_path=__UpperCAmelCase , n_obs=__UpperCAmelCase , prefix=__UpperCAmelCase , **__UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCAmelCase__ : List[str] = ds.make_sortish_sampler(__UpperCAmelCase , distributed=__UpperCAmelCase , add_extra_examples=__UpperCAmelCase , shuffle=__UpperCAmelCase )
lowerCAmelCase__ : int = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn )
lowerCAmelCase__ : Optional[int] = []
for batch in tqdm(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=__UpperCAmelCase , num_beams=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
lowerCAmelCase__ : Any = batch["""ids"""]
if num_return_sequences > 1:
lowerCAmelCase__ : Dict = chunks(__UpperCAmelCase , __UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCAmelCase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return results, sampler.num_replicas
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=__UpperCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=__UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=__UpperCAmelCase , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
parser.add_argument(
"""--type_path""" , type=__UpperCAmelCase , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=__UpperCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=__UpperCAmelCase , default=8 , required=__UpperCAmelCase , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=__UpperCAmelCase , default=-1 , required=__UpperCAmelCase , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=__UpperCAmelCase , default=1 , required=__UpperCAmelCase , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=__UpperCAmelCase , default=600 , required=__UpperCAmelCase , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument("""--tgt_lang""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument(
"""--prefix""" , type=__UpperCAmelCase , required=__UpperCAmelCase , default=__UpperCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowerCAmelCase__ : Union[str, Any] = time.time()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = parser.parse_known_args()
lowerCAmelCase__ : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(__UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
lowerCAmelCase__ : Tuple = Path(args.save_dir + """_tmp""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) # this handles locking.
lowerCAmelCase__ : Dict = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCAmelCase__ : Optional[int] = {}
if args.src_lang is not None:
lowerCAmelCase__ : Optional[int] = args.src_lang
if args.tgt_lang is not None:
lowerCAmelCase__ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = eval_data_dir(
args.data_dir , __UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
if args.local_rank <= 0:
lowerCAmelCase__ : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = gather_results_from_each_node(__UpperCAmelCase , __UpperCAmelCase , args.sync_timeout )
lowerCAmelCase__ : List[str] = combine_partial_results(__UpperCAmelCase )
if args.num_return_sequences > 1:
lowerCAmelCase__ : List[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return
lowerCAmelCase__ : str = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(__UpperCAmelCase ) as f:
lowerCAmelCase__ : Dict = [x.rstrip() for x in f.readlines()][: len(__UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCAmelCase__ : Dict = """translation""" in args.task
lowerCAmelCase__ : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
lowerCAmelCase__ : List[str] = """bleu""" if calc_bleu else """rouge"""
lowerCAmelCase__ : Dict = score_fn(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = len(__UpperCAmelCase )
lowerCAmelCase__ : int = time.time() - start_time
lowerCAmelCase__ : Optional[int] = round(runtime / metrics["""n_obs"""] , 4 )
lowerCAmelCase__ : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCAmelCase__ : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase )
print(__UpperCAmelCase )
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> List:
lowerCAmelCase__ : List[str] = []
for partial_result in partial_results:
records.extend(__UpperCAmelCase )
lowerCAmelCase__ : Dict = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["id"] )
lowerCAmelCase__ : Optional[int] = [x["""pred"""] for x in records]
return preds
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
lowerCAmelCase__ : Dict = time.time()
logger.info("""waiting for all nodes to finish""" )
lowerCAmelCase__ : Tuple = None
while (time.time() - start_wait) < timeout:
lowerCAmelCase__ : Optional[Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(__UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCAmelCase__ : Dict = lmap(__UpperCAmelCase , __UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 242 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__snake_case : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __lowerCamelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =test_results.split(""" """ )
A__ : List[Any] =0
A__ : Optional[int] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : Dict =expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Dict ={}
A__ : List[Any] =None
A__ : Any =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", __snake_case ):
A__ : List[str] =True
A__ : Optional[int] =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : List[str] =line
A__ : int =False
return failures
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =title
A__ : List[Any] =doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str =doc_test_results["""success"""]
A__ : str =doc_test_results["""failures"""]
A__ : Optional[int] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : List[Any] =doc_test_results
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : List[str] =[self._time_spent]
A__ : str =0
for time in time_spent:
A__ : Union[str, Any] =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
A__ : str =[0, 0, time_parts[0]]
A__ , A__ , A__ : int =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ : Optional[int] =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s"
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =40
A__ : List[Any] ={k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
A__ : Union[str, Any] =""""""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Tuple =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( ) -> Any:
'''simple docstring'''
A__ : Dict =[
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Tuple =f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
A__ : Optional[int] =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =""""""
for key, value in failures.items():
A__ : Optional[int] =value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
A__ : List[Any] =job_name
A__ : Dict ={"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict ={
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : Optional[Any] =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[Any] =f"*Num failures* :{len(job_result['failed'] )} \n"
A__ : Tuple =job_result["""failures"""]
A__ : Optional[Any] =self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =os.environ["""GITHUB_RUN_ID"""]
A__ : Tuple =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
A__ : List[str] =requests.get(__snake_case ).json()
A__ : List[str] ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[int] =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
A__ : List[Any] =requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", __snake_case )
return {}
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any ={}
if os.path.exists(__snake_case ):
A__ : str =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding="""utf-8""" ) as f:
A__ : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__snake_case, __snake_case )}." ) from e
return _artifact
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : List[Any] =name
A__ : str =[]
def __str__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] ={}
A__ : int =filter(os.path.isdir, os.listdir() )
for directory in directories:
A__ : List[Any] =directory
if artifact_name not in _available_artifacts:
A__ : str =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
__snake_case : List[str] = get_job_links()
__snake_case : int = retrieve_available_artifacts()
__snake_case : Union[str, Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__snake_case : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__snake_case : List[Any] = github_actions_job_links.get('run_doctests')
__snake_case : Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__snake_case : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__snake_case , __snake_case , __snake_case : Optional[Any] = handle_test_results(artifact['stats'])
__snake_case : Optional[Any] = failed
__snake_case : Union[str, Any] = success
__snake_case : Union[str, Any] = time_spent[1:-1] + ', '
__snake_case : int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__snake_case : Optional[int] = line.replace('FAILED ', '')
__snake_case : str = line.split()[0].replace('\n', '')
if "::" in line:
__snake_case , __snake_case : Optional[Any] = line.split('::')
else:
__snake_case , __snake_case : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__snake_case : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__snake_case : List[str] = all_failures[test] if test in all_failures else 'N/A'
__snake_case : Optional[Any] = failure
break
__snake_case : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 136 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : str =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : int =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Union[str, Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Dict =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : Union[str, Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Dict =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Optional[int] =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : int =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.get_dummy_components()
A__ : str =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : str =inputs["""prompt"""]
A__ : Optional[int] =inputs["""generator"""]
A__ : Optional[Any] =inputs["""num_inference_steps"""]
A__ : Union[str, Any] =inputs["""output_type"""]
if "image" in inputs:
A__ : Union[str, Any] =inputs["""image"""]
else:
A__ : List[Any] =None
if "mask_image" in inputs:
A__ : Union[str, Any] =inputs["""mask_image"""]
else:
A__ : Tuple =None
if "original_image" in inputs:
A__ : Optional[Any] =inputs["""original_image"""]
else:
A__ : Tuple =None
A__ , A__ : Optional[Any] =pipe.encode_prompt(lowerCAmelCase_ )
# inputs with prompt converted to embeddings
A__ : Optional[int] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : Tuple =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : int =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : int =inputs["""generator"""]
A__ : str =inputs["""num_inference_steps"""]
A__ : Optional[Any] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
A__ : List[Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : int =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
A__ : List[str] =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Union[str, Any] =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : int =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : List[Any] =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Tuple =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Tuple =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
| 136 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.